From b207bb764fad0f5a602c1040f26b2c00bbfdd0db Mon Sep 17 00:00:00 2001 From: Jian Qiu Date: Mon, 7 Nov 2022 15:33:15 +0800 Subject: [PATCH] use ocm-0.9 Signed-off-by: Jian Qiu --- controlplane/Makefile | 4 + ...gement.io_clustermanagementaddons.crd.yaml | 31 +- ...-management.io_managedclustersets.crd.yaml | 94 ++- ...anagement.io_managedclusteraddons.crd.yaml | 54 +- ...ment.io_managedclustersetbindings.crd.yaml | 60 +- ...anagement.io_appliedmanifestworks.crd.yaml | 81 -- ...agement.io_addondeploymentconfigs.crd.yaml | 93 +++ ...uster-management.io_clusterclaims.crd.yaml | 45 -- ...-cluster-management.io_placements.crd.yaml | 247 ------- ...-management.io_placementdecisions.crd.yaml | 42 -- ...anagement.io_appliedmanifestworks.crd.yaml | 83 --- controlplane/config/crds/bootstrap.go | 5 +- controlplane/go.mod | 18 +- controlplane/go.sum | 34 +- controlplane/hack/crd-update/copy-crds.sh | 9 + controlplane/hack/crd-update/init.sh | 15 + .../pkg/apiserver/kubeapiserver/aggregator.go | 27 +- .../ocmcontroller/ocmcontroller.go | 21 +- .../managedclustermutating/admission.go | 28 +- .../admission.go | 48 +- .../managedclustervalidating/admission.go | 49 +- .../google/gnostic/jsonschema/display.go | 17 +- .../google/gnostic/jsonschema/models.go | 8 +- .../google/gnostic/jsonschema/reader.go | 1 - .../google/gnostic/jsonschema/writer.go | 30 +- .../google/gnostic/openapiv2/OpenAPIv2.go | 7 +- .../google/gnostic/openapiv3/OpenAPIv3.go | 7 +- .../google/gnostic/openapiv3/OpenAPIv3.pb.go | 13 +- .../google/gnostic/openapiv3/OpenAPIv3.proto | 2 +- .../google/gnostic/openapiv3/README.md | 4 + .../gnostic/openapiv3/annotations.pb.go | 183 +++++ .../gnostic/openapiv3/annotations.proto | 60 ++ .../vendor/github.com/imdario/mergo/README.md | 32 +- .../vendor/github.com/imdario/mergo/merge.go | 2 +- .../vendor/github.com/imdario/mergo/mergo.go | 4 +- .../prometheus/collectors/collectors.go | 40 + .../collectors/dbstats_collector.go | 119 +++ .../prometheus/collectors/expvar_collector.go | 57 ++ .../collectors/go_collector_go116.go | 49 ++ .../collectors/go_collector_latest.go | 160 ++++ .../collectors/process_collector.go | 56 ++ .../vendor/gomodules.xyz/jsonpatch/v2/LICENSE | 202 +++++ .../gomodules.xyz/jsonpatch/v2/jsonpatch.go | 334 +++++++++ controlplane/vendor/modules.txt | 83 ++- ...gement.io_clustermanagementaddons.crd.yaml | 31 +- ...anagement.io_managedclusteraddons.crd.yaml | 54 +- ...agement.io_addondeploymentconfigs.crd.yaml | 93 +++ .../api/addon/v1alpha1/register.go | 2 + .../v1alpha1/types_addondeploymentconfig.go | 80 ++ .../v1alpha1/types_clustermanagementaddon.go | 60 +- .../v1alpha1/types_managedclusteraddon.go | 44 +- .../addon/v1alpha1/zz_generated.deepcopy.go | 244 +++++- .../zz_generated.swagger_doc_generated.go | 93 ++- .../typed/addon/v1alpha1/addon_client.go | 5 + .../addon/v1alpha1/addondeploymentconfig.go | 162 ++++ .../addon/v1alpha1/generated_expansion.go | 2 + .../addon/v1alpha1/addondeploymentconfig.go | 74 ++ .../addon/v1alpha1/interface.go | 7 + .../informers/externalversions/generic.go | 2 + .../addon/v1alpha1/addondeploymentconfig.go | 83 +++ .../addon/v1alpha1/expansion_generated.go | 8 + .../cluster/clientset/versioned/clientset.go | 13 + .../clientset/versioned/scheme/register.go | 2 + .../typed/cluster/v1alpha1/cluster_client.go | 20 - .../cluster/v1alpha1/generated_expansion.go | 8 - .../typed/cluster/v1alpha1/placement.go | 179 ----- .../cluster/v1alpha1/placementdecision.go | 179 ----- .../typed/cluster/v1beta2/cluster_client.go | 96 +++ .../versioned/typed/cluster/v1beta2/doc.go | 4 + .../cluster/v1beta2/generated_expansion.go | 7 + .../managedclusterset.go | 42 +- .../managedclustersetbinding.go | 53 +- .../externalversions/cluster/interface.go | 8 + .../cluster/v1alpha1/interface.go | 28 - .../cluster/v1alpha1/placement.go | 74 -- .../cluster/v1alpha1/placementdecision.go | 74 -- .../cluster/v1beta2/interface.go | 36 + .../managedclusterset.go | 20 +- .../managedclustersetbinding.go | 20 +- .../informers/externalversions/generic.go | 15 +- .../cluster/v1alpha1/expansion_generated.go | 28 - .../listers/cluster/v1alpha1/placement.go | 83 --- .../cluster/v1alpha1/placementdecision.go | 83 --- .../cluster/v1beta2/expansion_generated.go | 15 + .../managedclusterset.go | 18 +- .../managedclustersetbinding.go | 24 +- ...-management.io_managedclustersets.crd.yaml | 92 --- ...ment.io_managedclustersetbindings.crd.yaml | 47 -- ...-cluster-management.io_placements.crd.yaml | 265 ------- ...-management.io_placementdecisions.crd.yaml | 60 -- .../api/cluster/v1alpha1/register.go | 8 - .../api/cluster/v1alpha1/types.go | 466 ------------ .../cluster/v1alpha1/zz_generated.deepcopy.go | 558 -------------- .../zz_generated.swagger_doc_generated.go | 227 ------ ...-management.io_managedclustersets.crd.yaml | 71 -- ...ment.io_managedclustersetbindings.crd.yaml | 26 - ...-cluster-management.io_placements.crd.yaml | 247 ------- ...-management.io_placementdecisions.crd.yaml | 42 -- .../api/cluster/v1beta1/types_placement.go | 7 + ...-management.io_managedclustersets.crd.yaml | 271 +++++++ ...ment.io_managedclustersetbindings.crd.yaml | 173 +++++ .../api/cluster/v1beta2/doc.go | 9 + .../api/cluster/v1beta2/helpers.go | 110 +++ .../api/cluster/v1beta2/register.go | 40 + .../v1beta2/types_managedclusterset.go | 99 +++ .../v1beta2/types_managedclustersetbinding.go | 65 ++ .../cluster/v1beta2/zz_generated.deepcopy.go | 233 ++++++ .../zz_generated.swagger_doc_generated.go | 100 +++ .../api/feature/feature.go | 11 + .../placement/LICENSE | 201 +++++ .../pkg/controllers/framework/interface.go | 100 +++ .../placement/pkg/controllers/manager.go | 91 +++ .../scheduling/cluster_event_handler.go | 105 +++ .../scheduling/clusterset_event_handler.go | 95 +++ .../clustersetbinding_event_handler.go | 90 +++ .../pkg/controllers/scheduling/schedule.go | 430 +++++++++++ .../scheduling/scheduling_controller.go | 667 +++++++++++++++++ .../placement/pkg/debugger/debugger.go | 83 +++ .../placement/pkg/plugins/addon/addon.go | 115 +++ .../placement/pkg/plugins/balance/balance.go | 90 +++ .../placement/pkg/plugins/interface.go | 91 +++ .../pkg/plugins/predicate/predicate.go | 137 ++++ .../pkg/plugins/resource/resource.go | 164 +++++ .../placement/pkg/plugins/steady/steady.go | 92 +++ .../tainttoleration/taint_toleration.go | 261 +++++++ .../pkg/hub/addon/discovery_controller.go | 4 + .../pkg/hub/addon/healthcheck_controller.go | 5 +- ...nagedcluster-registration-clusterrole.yaml | 4 +- .../pkg/hub/managedcluster/controller.go | 5 +- .../pkg/hub/managedclusterset/controller.go | 16 +- .../managedclustersetbinding/controller.go | 2 +- .../pkg/webhook/cluster/mutating_webhook.go | 253 ------- .../pkg/webhook/cluster/validating_webhook.go | 295 -------- .../clustersetbinding/validating_webhook.go | 134 ---- .../pkg/webhook/v1/managedcluster_mutating.go | 125 ++++ .../webhook/v1/managedcluster_validating.go | 233 ++++++ .../registration/pkg/webhook/v1/webhook.go | 33 + .../v1beta1/managedclusterset_conversion.go | 4 + .../managedclustersetbinding_validating.go | 99 +++ .../pkg/webhook/v1beta1/webhook.go | 68 ++ .../v1beta2/managedclusterset_conversion.go | 50 ++ .../managedclustersetbinding_validating.go | 54 ++ .../pkg/webhook/v1beta2/webhook.go | 60 ++ .../sigs.k8s.io/controller-runtime/.gitignore | 24 + .../controller-runtime/.golangci.yml | 141 ++++ .../controller-runtime/CONTRIBUTING.md | 19 + .../sigs.k8s.io/controller-runtime/FAQ.md | 81 ++ .../sigs.k8s.io/controller-runtime/LICENSE | 201 +++++ .../sigs.k8s.io/controller-runtime/Makefile | 123 ++++ .../sigs.k8s.io/controller-runtime/OWNERS | 10 + .../controller-runtime/OWNERS_ALIASES | 41 ++ .../sigs.k8s.io/controller-runtime/README.md | 66 ++ .../controller-runtime/SECURITY_CONTACTS | 14 + .../controller-runtime/TMP-LOGGING.md | 169 +++++ .../controller-runtime/VERSIONING.md | 30 + .../sigs.k8s.io/controller-runtime/alias.go | 150 ++++ .../controller-runtime/code-of-conduct.md | 3 + .../sigs.k8s.io/controller-runtime/doc.go | 127 ++++ .../pkg/builder/controller.go | 335 +++++++++ .../controller-runtime/pkg/builder/doc.go | 28 + .../controller-runtime/pkg/builder/options.go | 140 ++++ .../controller-runtime/pkg/builder/webhook.go | 209 ++++++ .../controller-runtime/pkg/cache/cache.go | 275 +++++++ .../controller-runtime/pkg/cache/doc.go | 19 + .../pkg/cache/informer_cache.go | 217 ++++++ .../pkg/cache/internal/cache_reader.go | 218 ++++++ .../pkg/cache/internal/deleg_map.go | 126 ++++ .../pkg/cache/internal/disabledeepcopy.go | 35 + .../pkg/cache/internal/informers_map.go | 480 ++++++++++++ .../pkg/cache/internal/selector.go | 54 ++ .../pkg/cache/internal/transformers.go | 50 ++ .../pkg/cache/multi_namespace_cache.go | 331 +++++++++ .../pkg/certwatcher/certwatcher.go | 166 +++++ .../controller-runtime/pkg/certwatcher/doc.go | 23 + .../pkg/certwatcher/metrics/metrics.go | 45 ++ .../pkg/client/apiutil/apimachinery.go | 196 +++++ .../pkg/client/apiutil/dynamicrestmapper.go | 294 ++++++++ .../controller-runtime/pkg/client/client.go | 328 +++++++++ .../pkg/client/client_cache.go | 150 ++++ .../controller-runtime/pkg/client/codec.go | 40 + .../pkg/client/config/config.go | 157 ++++ .../pkg/client/config/doc.go | 18 + .../controller-runtime/pkg/client/doc.go | 49 ++ .../controller-runtime/pkg/client/dryrun.go | 106 +++ .../pkg/client/interfaces.go | 145 ++++ .../pkg/client/metadata_client.go | 193 +++++ .../pkg/client/namespaced_client.go | 213 ++++++ .../controller-runtime/pkg/client/object.go | 77 ++ .../controller-runtime/pkg/client/options.go | 697 ++++++++++++++++++ .../controller-runtime/pkg/client/patch.go | 213 ++++++ .../controller-runtime/pkg/client/split.go | 141 ++++ .../pkg/client/typed_client.go | 205 ++++++ .../pkg/client/unstructured_client.go | 271 +++++++ .../controller-runtime/pkg/client/watch.go | 114 +++ .../controller-runtime/pkg/cluster/cluster.go | 270 +++++++ .../pkg/cluster/internal.go | 128 ++++ .../controller-runtime/pkg/config/config.go | 112 +++ .../controller-runtime/pkg/config/doc.go | 25 + .../pkg/config/v1alpha1/doc.go | 20 + .../pkg/config/v1alpha1/register.go | 37 + .../pkg/config/v1alpha1/types.go | 157 ++++ .../config/v1alpha1/zz_generated.deepcopy.go | 153 ++++ .../pkg/controller/controller.go | 155 ++++ .../controllerutil/controllerutil.go | 394 ++++++++++ .../pkg/controller/controllerutil/doc.go | 20 + .../controller-runtime/pkg/controller/doc.go | 25 + .../pkg/conversion/conversion.go | 40 + .../controller-runtime/pkg/event/doc.go | 28 + .../controller-runtime/pkg/event/event.go | 55 ++ .../controller-runtime/pkg/handler/doc.go | 38 + .../controller-runtime/pkg/handler/enqueue.go | 90 +++ .../pkg/handler/enqueue_mapped.go | 97 +++ .../pkg/handler/enqueue_owner.go | 189 +++++ .../pkg/handler/eventhandler.go | 104 +++ .../controller-runtime/pkg/healthz/doc.go | 32 + .../controller-runtime/pkg/healthz/healthz.go | 206 ++++++ .../pkg/internal/controller/controller.go | 360 +++++++++ .../internal/controller/metrics/metrics.go | 78 ++ .../pkg/internal/httpserver/server.go | 16 + .../pkg/internal/log/log.go | 32 + .../pkg/internal/objectutil/objectutil.go | 78 ++ .../pkg/internal/recorder/recorder.go | 176 +++++ .../pkg/leaderelection/doc.go | 24 + .../pkg/leaderelection/leader_election.go | 127 ++++ .../controller-runtime/pkg/log/deleg.go | 188 +++++ .../controller-runtime/pkg/log/log.go | 102 +++ .../controller-runtime/pkg/log/null.go | 59 ++ .../pkg/log/warning_handler.go | 76 ++ .../controller-runtime/pkg/manager/doc.go | 21 + .../pkg/manager/internal.go | 652 ++++++++++++++++ .../controller-runtime/pkg/manager/manager.go | 620 ++++++++++++++++ .../pkg/manager/runnable_group.go | 297 ++++++++ .../pkg/manager/signals/doc.go | 20 + .../pkg/manager/signals/signal.go | 45 ++ .../pkg/manager/signals/signal_posix.go | 27 + .../pkg/manager/signals/signal_windows.go | 23 + .../pkg/metrics/client_go_adapter.go | 231 ++++++ .../controller-runtime/pkg/metrics/doc.go | 20 + .../pkg/metrics/listener.go | 52 ++ .../pkg/metrics/registry.go | 30 + .../pkg/metrics/workqueue.go | 130 ++++ .../controller-runtime/pkg/predicate/doc.go | 20 + .../pkg/predicate/predicate.go | 352 +++++++++ .../controller-runtime/pkg/ratelimiter/doc.go | 22 + .../pkg/ratelimiter/ratelimiter.go | 30 + .../controller-runtime/pkg/reconcile/doc.go | 21 + .../pkg/reconcile/reconcile.go | 102 +++ .../pkg/recorder/recorder.go | 31 + .../pkg/runtime/inject/doc.go | 22 + .../pkg/runtime/inject/inject.go | 164 +++++ .../controller-runtime/pkg/scheme/scheme.go | 94 +++ .../controller-runtime/pkg/source/doc.go | 22 + .../pkg/source/internal/eventsource.go | 138 ++++ .../controller-runtime/pkg/source/source.go | 375 ++++++++++ .../pkg/webhook/admission/decode.go | 72 ++ .../pkg/webhook/admission/defaulter.go | 74 ++ .../pkg/webhook/admission/defaulter_custom.go | 86 +++ .../pkg/webhook/admission/doc.go | 28 + .../pkg/webhook/admission/http.go | 153 ++++ .../pkg/webhook/admission/inject.go | 31 + .../pkg/webhook/admission/multi.go | 147 ++++ .../pkg/webhook/admission/response.go | 121 +++ .../pkg/webhook/admission/validator.go | 122 +++ .../pkg/webhook/admission/validator_custom.go | 113 +++ .../pkg/webhook/admission/webhook.go | 273 +++++++ .../controller-runtime/pkg/webhook/alias.go | 79 ++ .../pkg/webhook/conversion/conversion.go | 345 +++++++++ .../pkg/webhook/conversion/decoder.go | 47 ++ .../controller-runtime/pkg/webhook/doc.go | 28 + .../pkg/webhook/internal/metrics/metrics.go | 85 +++ .../controller-runtime/pkg/webhook/server.go | 345 +++++++++ 271 files changed, 24813 insertions(+), 4359 deletions(-) delete mode 100644 controlplane/config/crds/0000_01_work.open-cluster-management.io_appliedmanifestworks.crd.yaml create mode 100644 controlplane/config/crds/0000_02_addon.open-cluster-management.io_addondeploymentconfigs.crd.yaml delete mode 100644 controlplane/config/crds/0000_02_clusters.open-cluster-management.io_clusterclaims.crd.yaml delete mode 100644 controlplane/config/crds/0001_01_work.open-cluster-management.io_appliedmanifestworks.crd.yaml create mode 100755 controlplane/hack/crd-update/copy-crds.sh create mode 100644 controlplane/hack/crd-update/init.sh create mode 100644 controlplane/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go create mode 100644 controlplane/vendor/github.com/google/gnostic/openapiv3/annotations.proto create mode 100644 controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go create mode 100644 controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go create mode 100644 controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go create mode 100644 controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go create mode 100644 controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go create mode 100644 controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go create mode 100644 controlplane/vendor/gomodules.xyz/jsonpatch/v2/LICENSE create mode 100644 controlplane/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go create mode 100644 controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_02_addon.open-cluster-management.io_addondeploymentconfigs.crd.yaml create mode 100644 controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/types_addondeploymentconfig.go create mode 100644 controlplane/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/addondeploymentconfig.go create mode 100644 controlplane/vendor/open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1/addondeploymentconfig.go create mode 100644 controlplane/vendor/open-cluster-management.io/api/client/addon/listers/addon/v1alpha1/addondeploymentconfig.go delete mode 100644 controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/placement.go delete mode 100644 controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/placementdecision.go create mode 100644 controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta2/cluster_client.go create mode 100644 controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta2/doc.go create mode 100644 controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta2/generated_expansion.go rename controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/{v1alpha1 => v1beta2}/managedclusterset.go (76%) rename controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/{v1alpha1 => v1beta2}/managedclustersetbinding.go (68%) delete mode 100644 controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1alpha1/placement.go delete mode 100644 controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1alpha1/placementdecision.go create mode 100644 controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta2/interface.go rename controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/{v1alpha1 => v1beta2}/managedclusterset.go (79%) rename controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/{v1alpha1 => v1beta2}/managedclustersetbinding.go (78%) delete mode 100644 controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1/placement.go delete mode 100644 controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1/placementdecision.go create mode 100644 controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1beta2/expansion_generated.go rename controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/{v1alpha1 => v1beta2}/managedclusterset.go (71%) rename controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/{v1alpha1 => v1beta2}/managedclustersetbinding.go (79%) delete mode 100644 controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml delete mode 100644 controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml delete mode 100644 controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/0000_03_clusters.open-cluster-management.io_placements.crd.yaml delete mode 100644 controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/0000_04_clusters.open-cluster-management.io_placementdecisions.crd.yaml create mode 100644 controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml create mode 100644 controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml create mode 100644 controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/doc.go create mode 100644 controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/helpers.go create mode 100644 controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/register.go create mode 100644 controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/types_managedclusterset.go create mode 100644 controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/types_managedclustersetbinding.go create mode 100644 controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/zz_generated.deepcopy.go create mode 100644 controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/zz_generated.swagger_doc_generated.go create mode 100644 controlplane/vendor/open-cluster-management.io/placement/LICENSE create mode 100644 controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/framework/interface.go create mode 100644 controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/manager.go create mode 100644 controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/scheduling/cluster_event_handler.go create mode 100644 controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/scheduling/clusterset_event_handler.go create mode 100644 controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/scheduling/clustersetbinding_event_handler.go create mode 100644 controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/scheduling/schedule.go create mode 100644 controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/scheduling/scheduling_controller.go create mode 100644 controlplane/vendor/open-cluster-management.io/placement/pkg/debugger/debugger.go create mode 100644 controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/addon/addon.go create mode 100644 controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/balance/balance.go create mode 100644 controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/interface.go create mode 100644 controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/predicate/predicate.go create mode 100644 controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/resource/resource.go create mode 100644 controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/steady/steady.go create mode 100644 controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/tainttoleration/taint_toleration.go delete mode 100644 controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/cluster/mutating_webhook.go delete mode 100644 controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/cluster/validating_webhook.go delete mode 100644 controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/clustersetbinding/validating_webhook.go create mode 100644 controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1/managedcluster_mutating.go create mode 100644 controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1/managedcluster_validating.go create mode 100644 controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1/webhook.go create mode 100644 controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta1/managedclusterset_conversion.go create mode 100644 controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta1/managedclustersetbinding_validating.go create mode 100644 controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta1/webhook.go create mode 100644 controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta2/managedclusterset_conversion.go create mode 100644 controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta2/managedclustersetbinding_validating.go create mode 100644 controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta2/webhook.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/.gitignore create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/.golangci.yml create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/FAQ.md create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/LICENSE create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/Makefile create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/OWNERS create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/README.md create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/alias.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/code-of-conduct.md create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/builder/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics/metrics.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/controller/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/conversion/conversion.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/event/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/handler/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/httpserver/server.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_posix.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_windows.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/ratelimiter.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/recorder/recorder.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/source/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/doc.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics/metrics.go create mode 100644 controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go diff --git a/controlplane/Makefile b/controlplane/Makefile index 356edd6c4..fac18ac35 100644 --- a/controlplane/Makefile +++ b/controlplane/Makefile @@ -25,3 +25,7 @@ vendor: go mod tidy go mod vendor .PHONY: vendor + +update: + bash -x hack/crd-update/copy-crds.sh +.PHONY: update \ No newline at end of file diff --git a/controlplane/config/crds/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml b/controlplane/config/crds/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml index 6326a7b48..08bdb4056 100644 --- a/controlplane/config/crds/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml +++ b/controlplane/config/crds/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml @@ -38,7 +38,7 @@ spec: type: object properties: addOnConfiguration: - description: addOnConfiguration is a reference to configuration information for the add-on. In scenario where a multiple add-ons share the same add-on CRD, multiple ClusterManagementAddOn resources need to be created and reference the same AddOnConfiguration. + description: 'Deprecated: Use supportedConfigs filed instead addOnConfiguration is a reference to configuration information for the add-on. In scenario where a multiple add-ons share the same add-on CRD, multiple ClusterManagementAddOn resources need to be created and reference the same AddOnConfiguration.' type: object properties: crName: @@ -61,6 +61,35 @@ spec: displayName: description: displayName represents the name of add-on that will be displayed. type: string + supportedConfigs: + description: supportedConfigs is a list of configuration types supported by add-on. An empty list means the add-on does not require configurations. The default is an empty list + type: array + items: + description: ConfigMeta represents a collection of metadata information for add-on configuration. + type: object + required: + - resource + properties: + defaultConfig: + description: defaultConfig represents the namespace and name of the default add-on configuration. In scenario where all add-ons have a same configuration. + type: object + required: + - name + properties: + name: + description: name of the add-on configuration. + type: string + minLength: 1 + namespace: + description: namespace of the add-on configuration. If this field is not set, the configuration is in the cluster scope. + type: string + group: + description: group of the add-on configuration. + type: string + resource: + description: resource of the add-on configuration. + type: string + minLength: 1 status: description: status represents the current status of cluster management add-on. type: object diff --git a/controlplane/config/crds/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml b/controlplane/config/crds/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml index a898cc89c..8eeb1233f 100644 --- a/controlplane/config/crds/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml +++ b/controlplane/config/crds/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml @@ -15,11 +15,22 @@ spec: scope: Cluster preserveUnknownFields: false versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="ClusterSetEmpty")].status + name: Empty + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 deprecated: true - deprecationWarning: "cluster.open-cluster-management.io/v1alpha1 ManagedClusterSet is deprecated; use cluster.open-cluster-management.io/v1beta1 ManagedClusterSet" - schema: - openAPIV3Schema: + deprecationWarning: "cluster.open-cluster-management.io/v1beta1 ManagedClusterSet is deprecated; use cluster.open-cluster-management.io/v1beta2 ManagedClusterSet" + served: true + storage: true + subresources: + status: {} + "schema": + "openAPIV3Schema": description: "ManagedClusterSet defines a group of ManagedClusters that user's workload can run on. A workload can be defined to deployed on a ManagedClusterSet, which mean: 1. The workload can run on any ManagedCluster in the ManagedClusterSet 2. The workload cannot run on any ManagedCluster outside the ManagedClusterSet 3. The service exposed by the workload can be shared in any ManagedCluster in the ManagedClusterSet \n In order to assign a ManagedCluster to a certian ManagedClusterSet, add a label with name `cluster.open-cluster-management.io/clusterset` on the ManagedCluster to refers to the ManagedClusterSet. User is not allow to add/remove this label on a ManagedCluster unless they have a RBAC rule to CREATE on a virtual subresource of managedclustersets/join. In order to update this label, user must have the permission on both the old and new ManagedClusterSet." type: object properties: @@ -34,6 +45,53 @@ spec: spec: description: Spec defines the attributes of the ManagedClusterSet type: object + default: + clusterSelector: + selectorType: LegacyClusterSetLabel + properties: + clusterSelector: + description: ClusterSelector represents a selector of ManagedClusters + type: object + default: + selectorType: LegacyClusterSetLabel + properties: + labelSelector: + description: LabelSelector define the general labelSelector which clusterset will use to select target managedClusters + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + selectorType: + description: SelectorType could only be "LegacyClusterSetLabel" or "LabelSelector" "LegacyClusterSetLabel" means to use label "cluster.open-cluster-management.io/clusterset:"" to select target clusters. "LabelSelector" means use labelSelector to select target managedClusters + type: string + default: LegacyClusterSetLabel + enum: + - LegacyClusterSetLabel + - LabelSelector status: description: Status represents the current status of the ManagedClusterSet type: object @@ -82,10 +140,6 @@ spec: type: string maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - served: true - storage: false - subresources: - status: {} - additionalPrinterColumns: - jsonPath: .status.conditions[?(@.type=="ClusterSetEmpty")].status name: Empty @@ -93,9 +147,13 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1beta1 - schema: - openAPIV3Schema: + name: v1beta2 + served: true + storage: false + subresources: + status: {} + "schema": + "openAPIV3Schema": description: "ManagedClusterSet defines a group of ManagedClusters that user's workload can run on. A workload can be defined to deployed on a ManagedClusterSet, which mean: 1. The workload can run on any ManagedCluster in the ManagedClusterSet 2. The workload cannot run on any ManagedCluster outside the ManagedClusterSet 3. The service exposed by the workload can be shared in any ManagedCluster in the ManagedClusterSet \n In order to assign a ManagedCluster to a certian ManagedClusterSet, add a label with name `cluster.open-cluster-management.io/clusterset` on the ManagedCluster to refers to the ManagedClusterSet. User is not allow to add/remove this label on a ManagedCluster unless they have a RBAC rule to CREATE on a virtual subresource of managedclustersets/join. In order to update this label, user must have the permission on both the old and new ManagedClusterSet." type: object properties: @@ -112,13 +170,13 @@ spec: type: object default: clusterSelector: - selectorType: LegacyClusterSetLabel + selectorType: ExclusiveClusterSetLabel properties: clusterSelector: description: ClusterSelector represents a selector of ManagedClusters type: object default: - selectorType: LegacyClusterSetLabel + selectorType: ExclusiveClusterSetLabel properties: labelSelector: description: LabelSelector define the general labelSelector which clusterset will use to select target managedClusters @@ -151,11 +209,11 @@ spec: additionalProperties: type: string selectorType: - description: SelectorType could only be "LegacyClusterSetLabel" or "LabelSelector" "LegacyClusterSetLabel" means to use label "cluster.open-cluster-management.io/clusterset:"" to select target clusters. "LabelSelector" means use labelSelector to select target managedClusters + description: SelectorType could only be "ExclusiveClusterSetLabel" or "LabelSelector" "ExclusiveClusterSetLabel" means to use label "cluster.open-cluster-management.io/clusterset:"" to select target clusters. "LabelSelector" means use labelSelector to select target managedClusters type: string - default: LegacyClusterSetLabel + default: ExclusiveClusterSetLabel enum: - - LegacyClusterSetLabel + - ExclusiveClusterSetLabel - LabelSelector status: description: Status represents the current status of the ManagedClusterSet @@ -205,10 +263,6 @@ spec: type: string maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - served: true - storage: true - subresources: - status: {} status: acceptedNames: kind: "" diff --git a/controlplane/config/crds/0000_01_addon.open-cluster-management.io_managedclusteraddons.crd.yaml b/controlplane/config/crds/0000_01_addon.open-cluster-management.io_managedclusteraddons.crd.yaml index 614048d1e..9d12081b7 100644 --- a/controlplane/config/crds/0000_01_addon.open-cluster-management.io_managedclusteraddons.crd.yaml +++ b/controlplane/config/crds/0000_01_addon.open-cluster-management.io_managedclusteraddons.crd.yaml @@ -42,9 +42,33 @@ spec: description: spec holds configuration that could apply to any operator. type: object properties: + configs: + description: configs is a list of add-on configurations. In scenario where the current add-on has its own configurations. An empty list means there are no defautl configurations for add-on. The default is an empty list + type: array + items: + type: object + required: + - name + - resource + properties: + group: + description: group of the add-on configuration. + type: string + name: + description: name of the add-on configuration. + type: string + minLength: 1 + namespace: + description: namespace of the add-on configuration. If this field is not set, the configuration is in the cluster scope. + type: string + resource: + description: resource of the add-on configuration. + type: string + minLength: 1 installNamespace: description: installNamespace is the namespace on the managed cluster to install the addon agent. If it is not set, open-cluster-management-agent-addon namespace is used to install the addon agent. type: string + default: open-cluster-management-agent-addon maxLength: 63 pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ status: @@ -52,7 +76,7 @@ spec: type: object properties: addOnConfiguration: - description: addOnConfiguration is a reference to configuration information for the add-on. This resource is use to locate the configuration resource for the add-on. + description: 'Deprecated: Use configReference instead addOnConfiguration is a reference to configuration information for the add-on. This resource is use to locate the configuration resource for the add-on.' type: object properties: crName: @@ -119,6 +143,34 @@ spec: type: string maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + configReferences: + description: configReferences is a list of current add-on configuration references. This will be overridden by the clustermanagementaddon configuration references. + type: array + items: + description: ConfigReference is a reference to the current add-on configuration. This resource is used to locate the configuration resource for the current add-on. + type: object + required: + - name + - resource + properties: + group: + description: group of the add-on configuration. + type: string + lastObservedGeneration: + description: lastObservedGeneration is the observed generation of the add-on configuration. + type: integer + format: int64 + name: + description: name of the add-on configuration. + type: string + minLength: 1 + namespace: + description: namespace of the add-on configuration. If this field is not set, the configuration is in the cluster scope. + type: string + resource: + description: resource of the add-on configuration. + type: string + minLength: 1 healthCheck: description: healthCheck indicates how to check the healthiness status of the current addon. It should be set by each addon implementation, by default, the lease mode will be used. type: object diff --git a/controlplane/config/crds/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml b/controlplane/config/crds/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml index 86f1c49dd..bf35bb406 100644 --- a/controlplane/config/crds/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml +++ b/controlplane/config/crds/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml @@ -15,9 +15,9 @@ spec: scope: Namespaced preserveUnknownFields: false versions: - - name: v1alpha1 + - name: v1beta1 deprecated: true - deprecationWarning: "cluster.open-cluster-management.io/v1alpha1 ManagedClusterSetBinding is deprecated; use cluster.open-cluster-management.io/v1beta1 ManagedClusterSetBinding" + deprecationWarning: "cluster.open-cluster-management.io/v1beta1 ManagedClusterSetBinding is deprecated; use cluster.open-cluster-management.io/v1beta2 ManagedClusterSetBinding" schema: openAPIV3Schema: description: ManagedClusterSetBinding projects a ManagedClusterSet into a certain namespace. User is able to create a ManagedClusterSetBinding in a namespace and bind it to a ManagedClusterSet if they have an RBAC rule to CREATE on the virtual subresource of managedclustersets/bind. Workloads created in the same namespace can only be distributed to ManagedClusters in ManagedClusterSets bound in this namespace by higher level controllers. @@ -39,9 +39,59 @@ spec: description: ClusterSet is the name of the ManagedClusterSet to bind. It must match the instance name of the ManagedClusterSetBinding and cannot change once created. User is allowed to set this field if they have an RBAC rule to CREATE on the virtual subresource of managedclustersets/bind. type: string minLength: 1 + status: + description: Status represents the current status of the ManagedClusterSetBinding + type: object + properties: + conditions: + description: Conditions contains the different condition statuses for this ManagedClusterSetBinding. + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ served: true - storage: false - - name: v1beta1 + storage: true + subresources: + status: {} + - name: v1beta2 schema: openAPIV3Schema: description: ManagedClusterSetBinding projects a ManagedClusterSet into a certain namespace. User is able to create a ManagedClusterSetBinding in a namespace and bind it to a ManagedClusterSet if they have an RBAC rule to CREATE on the virtual subresource of managedclustersets/bind. Workloads created in the same namespace can only be distributed to ManagedClusters in ManagedClusterSets bound in this namespace by higher level controllers. @@ -112,7 +162,7 @@ spec: maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ served: true - storage: true + storage: false subresources: status: {} status: diff --git a/controlplane/config/crds/0000_01_work.open-cluster-management.io_appliedmanifestworks.crd.yaml b/controlplane/config/crds/0000_01_work.open-cluster-management.io_appliedmanifestworks.crd.yaml deleted file mode 100644 index 892fd392e..000000000 --- a/controlplane/config/crds/0000_01_work.open-cluster-management.io_appliedmanifestworks.crd.yaml +++ /dev/null @@ -1,81 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: appliedmanifestworks.work.open-cluster-management.io -spec: - group: work.open-cluster-management.io - names: - kind: AppliedManifestWork - listKind: AppliedManifestWorkList - plural: appliedmanifestworks - singular: appliedmanifestwork - scope: Cluster - preserveUnknownFields: false - versions: - - name: v1 - schema: - openAPIV3Schema: - description: AppliedManifestWork represents an applied manifestwork on managed cluster that is placed on a managed cluster. An AppliedManifestWork links to a manifestwork on a hub recording resources deployed in the managed cluster. When the agent is removed from managed cluster, cluster-admin on managed cluster can delete appliedmanifestwork to remove resources deployed by the agent. The name of the appliedmanifestwork must be in the format of {hash of hub's first kube-apiserver url}-{manifestwork name} - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec represents the desired configuration of AppliedManifestWork. - type: object - properties: - hubHash: - description: HubHash represents the hash of the first hub kube apiserver to identify which hub this AppliedManifestWork links to. - type: string - manifestWorkName: - description: ManifestWorkName represents the name of the related manifestwork on the hub. - type: string - status: - description: Status represents the current status of AppliedManifestWork. - type: object - properties: - appliedResources: - description: AppliedResources represents a list of resources defined within the manifestwork that are applied. Only resources with valid GroupVersionResource, namespace, and name are suitable. An item in this slice is deleted when there is no mapped manifest in manifestwork.Spec or by finalizer. The resource relating to the item will also be removed from managed cluster. The deleted resource may still be present until the finalizers for that resource are finished. However, the resource will not be undeleted, so it can be removed from this list and eventual consistency is preserved. - type: array - items: - description: AppliedManifestResourceMeta represents the group, version, resource, name and namespace of a resource. Since these resources have been created, they must have valid group, version, resource, namespace, and name. - type: object - required: - - name - - resource - - version - properties: - group: - description: Group is the API Group of the Kubernetes resource, empty string indicates it is in core group. - type: string - name: - description: Name is the name of the Kubernetes resource. - type: string - namespace: - description: Name is the namespace of the Kubernetes resource, empty string indicates it is a cluster scoped resource. - type: string - resource: - description: Resource is the resource name of the Kubernetes resource. - type: string - uid: - description: UID is set on successful deletion of the Kubernetes resource by controller. The resource might be still visible on the managed cluster after this field is set. It is not directly settable by a client. - type: string - version: - description: Version is the version of the Kubernetes resource. - type: string - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/controlplane/config/crds/0000_02_addon.open-cluster-management.io_addondeploymentconfigs.crd.yaml b/controlplane/config/crds/0000_02_addon.open-cluster-management.io_addondeploymentconfigs.crd.yaml new file mode 100644 index 000000000..0707f403c --- /dev/null +++ b/controlplane/config/crds/0000_02_addon.open-cluster-management.io_addondeploymentconfigs.crd.yaml @@ -0,0 +1,93 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: addondeploymentconfigs.addon.open-cluster-management.io +spec: + group: addon.open-cluster-management.io + names: + kind: AddOnDeploymentConfig + listKind: AddOnDeploymentConfigList + plural: addondeploymentconfigs + singular: addondeploymentconfig + scope: Namespaced + preserveUnknownFields: false + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: AddOnDeploymentConfig represents a deployment configuration for an add-on. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec represents a desired configuration for an add-on. + type: object + properties: + customizedVariables: + description: CustomizedVariables is a list of name-value variables for the current add-on deployment. The add-on implementation can use these variables to render its add-on deployment. The default is an empty list. + type: array + items: + description: CustomizedVariable represents a customized variable for add-on deployment. + type: object + required: + - name + properties: + name: + description: Name of this variable. + type: string + maxLength: 255 + pattern: ^[a-zA-Z_][_a-zA-Z0-9]*$ + value: + description: Value of this variable. + type: string + maxLength: 1024 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodePlacement: + description: NodePlacement enables explicit control over the scheduling of the add-on agents on the managed cluster. All add-on agent pods are expected to comply with this node placement. If the placement is nil, the placement is not specified, it will be omitted. If the placement is an empty object, the placement will match all nodes and tolerate nothing. + type: object + properties: + nodeSelector: + description: NodeSelector defines which Nodes the Pods are scheduled on. If the selector is an empty list, it will match all nodes. The default is an empty list. + type: object + additionalProperties: + type: string + tolerations: + description: Tolerations is attached by pods to tolerate any taint that matches the triple using the matching operator . If the tolerations is an empty list, it will tolerate nothing. The default is an empty list. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/controlplane/config/crds/0000_02_clusters.open-cluster-management.io_clusterclaims.crd.yaml b/controlplane/config/crds/0000_02_clusters.open-cluster-management.io_clusterclaims.crd.yaml deleted file mode 100644 index 0f5b20f73..000000000 --- a/controlplane/config/crds/0000_02_clusters.open-cluster-management.io_clusterclaims.crd.yaml +++ /dev/null @@ -1,45 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: clusterclaims.cluster.open-cluster-management.io -spec: - group: cluster.open-cluster-management.io - names: - kind: ClusterClaim - listKind: ClusterClaimList - plural: clusterclaims - singular: clusterclaim - scope: Cluster - preserveUnknownFields: false - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: ClusterClaim represents cluster information that a managed cluster claims ClusterClaims with well known names include, 1. id.k8s.io, it contains a unique identifier for the cluster. 2. clusterset.k8s.io, it contains an identifier that relates the cluster to the ClusterSet in which it belongs. ClusterClaims created on a managed cluster will be collected and saved into the status of the corresponding ManagedCluster on hub. - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec defines the attributes of the ClusterClaim. - type: object - properties: - value: - description: Value is a claim-dependent string - type: string - maxLength: 1024 - minLength: 1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/controlplane/config/crds/0000_02_clusters.open-cluster-management.io_placements.crd.yaml b/controlplane/config/crds/0000_02_clusters.open-cluster-management.io_placements.crd.yaml index 639d34cc5..4c22187b6 100644 --- a/controlplane/config/crds/0000_02_clusters.open-cluster-management.io_placements.crd.yaml +++ b/controlplane/config/crds/0000_02_clusters.open-cluster-management.io_placements.crd.yaml @@ -12,253 +12,6 @@ spec: scope: Namespaced preserveUnknownFields: false versions: - - additionalPrinterColumns: - - jsonPath: .status.conditions[?(@.type=="PlacementSatisfied")].status - name: Succeeded - type: string - - jsonPath: .status.conditions[?(@.type=="PlacementSatisfied")].reason - name: Reason - type: string - - jsonPath: .status.numberOfSelectedClusters - name: SelectedClusters - type: integer - name: v1alpha1 - deprecated: true - deprecationWarning: "cluster.open-cluster-management.io/v1alpha1 Placement is deprecated; use cluster.open-cluster-management.io/v1beta1 Placement" - schema: - openAPIV3Schema: - description: "Placement defines a rule to select a set of ManagedClusters from the ManagedClusterSets bound to the placement namespace. \n Here is how the placement policy combines with other selection methods to determine a matching list of ManagedClusters: 1) Kubernetes clusters are registered with hub as cluster-scoped ManagedClusters; 2) ManagedClusters are organized into cluster-scoped ManagedClusterSets; 3) ManagedClusterSets are bound to workload namespaces; 4) Namespace-scoped Placements specify a slice of ManagedClusterSets which select a working set of potential ManagedClusters; 5) Then Placements subselect from that working set using label/claim selection. \n No ManagedCluster will be selected if no ManagedClusterSet is bound to the placement namespace. User is able to bind a ManagedClusterSet to a namespace by creating a ManagedClusterSetBinding in that namespace if they have a RBAC rule to CREATE on the virtual subresource of `managedclustersets/bind`. \n A slice of PlacementDecisions with label cluster.open-cluster-management.io/placement={placement name} will be created to represent the ManagedClusters selected by this placement. \n If a ManagedCluster is selected and added into the PlacementDecisions, other components may apply workload on it; once it is removed from the PlacementDecisions, the workload applied on this ManagedCluster should be evicted accordingly." - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec defines the attributes of Placement. - type: object - properties: - clusterSets: - description: ClusterSets represent the ManagedClusterSets from which the ManagedClusters are selected. If the slice is empty, ManagedClusters will be selected from the ManagedClusterSets bound to the placement namespace, otherwise ManagedClusters will be selected from the intersection of this slice and the ManagedClusterSets bound to the placement namespace. - type: array - items: - type: string - numberOfClusters: - description: NumberOfClusters represents the desired number of ManagedClusters to be selected which meet the placement requirements. 1) If not specified, all ManagedClusters which meet the placement requirements (including ClusterSets, and Predicates) will be selected; 2) Otherwise if the nubmer of ManagedClusters meet the placement requirements is larger than NumberOfClusters, a random subset with desired number of ManagedClusters will be selected; 3) If the nubmer of ManagedClusters meet the placement requirements is equal to NumberOfClusters, all of them will be selected; 4) If the nubmer of ManagedClusters meet the placement requirements is less than NumberOfClusters, all of them will be selected, and the status of condition `PlacementConditionSatisfied` will be set to false; - type: integer - format: int32 - predicates: - description: Predicates represent a slice of predicates to select ManagedClusters. The predicates are ORed. - type: array - items: - description: ClusterPredicate represents a predicate to select ManagedClusters. - type: object - properties: - requiredClusterSelector: - description: RequiredClusterSelector represents a selector of ManagedClusters by label and claim. If specified, 1) Any ManagedCluster, which does not match the selector, should not be selected by this ClusterPredicate; 2) If a selected ManagedCluster (of this ClusterPredicate) ceases to match the selector (e.g. due to an update) of any ClusterPredicate, it will be eventually removed from the placement decisions; 3) If a ManagedCluster (not selected previously) starts to match the selector, it will either be selected or at least has a chance to be selected (when NumberOfClusters is specified); - type: object - properties: - claimSelector: - description: ClaimSelector represents a selector of ManagedClusters by clusterClaims in status - type: object - properties: - matchExpressions: - description: matchExpressions is a list of cluster claim selector requirements. The requirements are ANDed. - type: array - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - type: object - required: - - key - - operator - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - type: array - items: - type: string - labelSelector: - description: LabelSelector represents a selector of ManagedClusters by label - type: object - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - type: array - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - type: object - required: - - key - - operator - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - type: array - items: - type: string - matchLabels: - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - additionalProperties: - type: string - prioritizerPolicy: - description: PrioritizerPolicy defines the policy of the prioritizers. If this field is unset, then default prioritizer mode and configurations are used. Referring to PrioritizerPolicy to see more description about Mode and Configurations. - type: object - properties: - configurations: - type: array - items: - description: PrioritizerConfig represents the configuration of prioritizer - type: object - properties: - name: - description: 'Name will be removed in v1beta1 and replaced by ScoreCoordinate.BuiltIn. If both Name and ScoreCoordinate.BuiltIn are defined, will use the value in ScoreCoordinate.BuiltIn. Name is the name of a prioritizer. Below are the valid names: 1) Balance: balance the decisions among the clusters. 2) Steady: ensure the existing decision is stabilized. 3) ResourceAllocatableCPU & ResourceAllocatableMemory: sort clusters based on the allocatable.' - type: string - scoreCoordinate: - description: ScoreCoordinate represents the configuration of the prioritizer and score source. - type: object - required: - - type - properties: - addOn: - description: When type is "AddOn", AddOn defines the resource name and score name. - type: object - required: - - resourceName - - scoreName - properties: - resourceName: - description: ResourceName defines the resource name of the AddOnPlacementScore. The placement prioritizer selects AddOnPlacementScore CR by this name. - type: string - scoreName: - description: ScoreName defines the score name inside AddOnPlacementScore. AddOnPlacementScore contains a list of score name and score value, ScoreName specify the score to be used by the prioritizer. - type: string - builtIn: - description: 'BuiltIn defines the name of a BuiltIn prioritizer. Below are the valid BuiltIn prioritizer names. 1) Balance: balance the decisions among the clusters. 2) Steady: ensure the existing decision is stabilized. 3) ResourceAllocatableCPU & ResourceAllocatableMemory: sort clusters based on the allocatable.' - type: string - type: - description: Type defines the type of the prioritizer score. Type is either "BuiltIn", "AddOn" or "", where "" is "BuiltIn" by default. When the type is "BuiltIn", need to specify a BuiltIn prioritizer name in BuiltIn. When the type is "AddOn", need to configure the score source in AddOn. - type: string - default: BuiltIn - enum: - - BuiltIn - - AddOn - weight: - description: Weight defines the weight of the prioritizer score. The value must be ranged in [-10,10]. Each prioritizer will calculate an integer score of a cluster in the range of [-100, 100]. The final score of a cluster will be sum(weight * prioritizer_score). A higher weight indicates that the prioritizer weights more in the cluster selection, while 0 weight indicates that the prioritizer is disabled. A negative weight indicates wants to select the last ones. - type: integer - format: int32 - default: 1 - maximum: 10 - minimum: -10 - mode: - description: Mode is either Exact, Additive, "" where "" is Additive by default. In Additive mode, any prioritizer not explicitly enumerated is enabled in its default Configurations, in which Steady and Balance prioritizers have the weight of 1 while other prioritizers have the weight of 0. Additive doesn't require configuring all prioritizers. The default Configurations may change in the future, and additional prioritization will happen. In Exact mode, any prioritizer not explicitly enumerated is weighted as zero. Exact requires knowing the full set of prioritizers you want, but avoids behavior changes between releases. - type: string - default: Additive - tolerations: - description: Tolerations are applied to placements, and allow (but do not require) the managed clusters with certain taints to be selected by placements with matching tolerations. - type: array - items: - description: Toleration represents the toleration object that can be attached to a placement. The placement this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - type: object - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSelect, PreferNoSelect and NoSelectIfNew. - type: string - enum: - - NoSelect - - PreferNoSelect - - NoSelectIfNew - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a placement can tolerate all taints of a particular category. - type: string - default: Equal - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoSelect/PreferNoSelect, otherwise this field is ignored) tolerates the taint. The default value is nil, which indicates it tolerates the taint forever. The start time of counting the TolerationSeconds should be the TimeAdded in Taint, not the cluster scheduled time or TolerationSeconds added time. - type: integer - format: int64 - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - maxLength: 1024 - status: - description: Status represents the current status of the Placement - type: object - properties: - conditions: - description: Conditions contains the different condition status for this Placement. - type: array - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - type: object - required: - - lastTransitionTime - - message - - reason - - status - - type - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - type: string - format: date-time - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - type: string - maxLength: 32768 - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - type: integer - format: int64 - minimum: 0 - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - type: string - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - status: - description: status of the condition, one of True, False, Unknown. - type: string - enum: - - "True" - - "False" - - Unknown - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - type: string - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - numberOfSelectedClusters: - description: NumberOfSelectedClusters represents the number of selected ManagedClusters - type: integer - format: int32 - served: true - storage: false - subresources: - status: {} - additionalPrinterColumns: - jsonPath: .status.conditions[?(@.type=="PlacementSatisfied")].status name: Succeeded diff --git a/controlplane/config/crds/0000_03_clusters.open-cluster-management.io_placementdecisions.crd.yaml b/controlplane/config/crds/0000_03_clusters.open-cluster-management.io_placementdecisions.crd.yaml index e6b8a0024..2a7353118 100644 --- a/controlplane/config/crds/0000_03_clusters.open-cluster-management.io_placementdecisions.crd.yaml +++ b/controlplane/config/crds/0000_03_clusters.open-cluster-management.io_placementdecisions.crd.yaml @@ -12,48 +12,6 @@ spec: scope: Namespaced preserveUnknownFields: false versions: - - name: v1alpha1 - deprecated: true - deprecationWarning: "cluster.open-cluster-management.io/v1alpha1 PlacementDecision is deprecated; use cluster.open-cluster-management.io/v1beta1 PlacementDecision" - schema: - openAPIV3Schema: - description: "PlacementDecision indicates a decision from a placement PlacementDecision should has a label cluster.open-cluster-management.io/placement={placement name} to reference a certain placement. \n If a placement has spec.numberOfClusters specified, the total number of decisions contained in status.decisions of PlacementDecisions should always be NumberOfClusters; otherwise, the total number of decisions should be the number of ManagedClusters which match the placement requirements. \n Some of the decisions might be empty when there are no enough ManagedClusters meet the placement requirements." - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - status: - description: Status represents the current status of the PlacementDecision - type: object - required: - - decisions - properties: - decisions: - description: Decisions is a slice of decisions according to a placement The number of decisions should not be larger than 100 - type: array - items: - description: ClusterDecision represents a decision from a placement An empty ClusterDecision indicates it is not scheduled yet. - type: object - required: - - clusterName - - reason - properties: - clusterName: - description: ClusterName is the name of the ManagedCluster. If it is not empty, its value should be unique cross all placement decisions for the Placement. - type: string - reason: - description: Reason represents the reason why the ManagedCluster is selected. - type: string - served: true - storage: false - subresources: - status: {} - name: v1beta1 schema: openAPIV3Schema: diff --git a/controlplane/config/crds/0001_01_work.open-cluster-management.io_appliedmanifestworks.crd.yaml b/controlplane/config/crds/0001_01_work.open-cluster-management.io_appliedmanifestworks.crd.yaml deleted file mode 100644 index e75e8c054..000000000 --- a/controlplane/config/crds/0001_01_work.open-cluster-management.io_appliedmanifestworks.crd.yaml +++ /dev/null @@ -1,83 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - creationTimestamp: null - name: appliedmanifestworks.work.open-cluster-management.io -spec: - group: work.open-cluster-management.io - names: - kind: AppliedManifestWork - listKind: AppliedManifestWorkList - plural: appliedmanifestworks - singular: appliedmanifestwork - scope: "Cluster" - preserveUnknownFields: false - subresources: - status: {} - validation: - openAPIV3Schema: - description: AppliedManifestWork represents an applied manifestwork on managed cluster that is placed on a managed cluster. An AppliedManifestWork links to a manifestwork on a hub recording resources deployed in the managed cluster. When the agent is removed from managed cluster, cluster-admin on managed cluster can delete appliedmanifestwork to remove resources deployed by the agent. The name of the appliedmanifestwork must be in the format of {hash of hub's first kube-apiserver url}-{manifestwork name} - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec represents the desired configuration of AppliedManifestWork. - type: object - properties: - hubHash: - description: HubHash represents the hash of the first hub kube apiserver to identify which hub this AppliedManifestWork links to. - type: string - manifestWorkName: - description: ManifestWorkName represents the name of the related manifestwork on the hub. - type: string - status: - description: Status represents the current status of AppliedManifestWork. - type: object - properties: - appliedResources: - description: AppliedResources represents a list of resources defined within the manifestwork that are applied. Only resources with valid GroupVersionResource, namespace, and name are suitable. An item in this slice is deleted when there is no mapped manifest in manifestwork.Spec or by finalizer. The resource relating to the item will also be removed from managed cluster. The deleted resource may still be present until the finalizers for that resource are finished. However, the resource will not be undeleted, so it can be removed from this list and eventual consistency is preserved. - type: array - items: - description: AppliedManifestResourceMeta represents the group, version, resource, name and namespace of a resource. Since these resources have been created, they must have valid group, version, resource, namespace, and name. - type: object - required: - - name - - resource - - version - properties: - group: - description: Group is the API Group of the Kubernetes resource, empty string indicates it is in core group. - type: string - name: - description: Name is the name of the Kubernetes resource. - type: string - namespace: - description: Name is the namespace of the Kubernetes resource, empty string indicates it is a cluster scoped resource. - type: string - resource: - description: Resource is the resource name of the Kubernetes resource. - type: string - uid: - description: UID is set on successful deletion of the Kubernetes resource by controller. The resource might be still visible on the managed cluster after this field is set. It is not directly settable by a client. - type: string - version: - description: Version is the version of the Kubernetes resource. - type: string - version: v1 - versions: - - name: v1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/controlplane/config/crds/bootstrap.go b/controlplane/config/crds/bootstrap.go index d1df1b3c0..59911556c 100644 --- a/controlplane/config/crds/bootstrap.go +++ b/controlplane/config/crds/bootstrap.go @@ -37,13 +37,12 @@ var baseCRD = []string{ // manifest work "0000_00_work.open-cluster-management.io_manifestworks.crd.yaml", // addon + "0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml", "0000_01_addon.open-cluster-management.io_managedclusteraddons.crd.yaml", - // cluster claim - "0000_02_clusters.open-cluster-management.io_clusterclaims.crd.yaml", + "0000_02_addon.open-cluster-management.io_addondeploymentconfigs.crd.yaml", // placement & placementdecision "0000_02_clusters.open-cluster-management.io_placements.crd.yaml", "0000_03_clusters.open-cluster-management.io_placementdecisions.crd.yaml", - // placement score "0000_05_clusters.open-cluster-management.io_addonplacementscores.crd.yaml", } diff --git a/controlplane/go.mod b/controlplane/go.mod index 828eea263..a5e9c5c36 100644 --- a/controlplane/go.mod +++ b/controlplane/go.mod @@ -17,9 +17,11 @@ require ( k8s.io/kube-aggregator v0.24.3 k8s.io/kubernetes v1.24.0 k8s.io/utils v0.0.0-20220713171938-56c0de1e6f5e - open-cluster-management.io/api v0.8.0 + open-cluster-management.io/api v0.9.0 open-cluster-management.io/clusteradm v0.4.0 - open-cluster-management.io/registration v0.7.0 + open-cluster-management.io/placement v0.9.0 + open-cluster-management.io/registration v0.9.1-0.20221104092257-2e1c039a8e17 + sigs.k8s.io/controller-runtime v0.12.3 ) require ( @@ -67,7 +69,7 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/gnostic v0.6.9 // indirect github.com/google/go-cmp v0.5.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect @@ -79,7 +81,7 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/imdario/mergo v0.3.13 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -128,10 +130,10 @@ require ( go.etcd.io/etcd/raft/v3 v3.5.4 // indirect go.opencensus.io v0.23.0 // indirect go.starlark.net v0.0.0-20220302181546-5411bad688d1 // indirect - go.uber.org/goleak v1.1.12 // indirect golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect + gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect gonum.org/v1/gonum v0.6.2 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.28.1 // indirect @@ -166,7 +168,7 @@ require ( google.golang.org/api v0.92.0 // indirect google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc // indirect google.golang.org/grpc v1.49.0 // indirect - k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 + k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8 ) require ( @@ -246,6 +248,7 @@ replace ( k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.24.0 k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.24.0 k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.24.0 + k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 k8s.io/kube-proxy => k8s.io/kube-proxy v0.24.0 k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.24.0 k8s.io/kubectl => k8s.io/kubectl v0.24.0 @@ -256,6 +259,3 @@ replace ( k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.24.0 k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.24.0 ) - -// TODO(ycyaoxdu): modify registration, and maybe we can import registration directly? -replace open-cluster-management.io/registration => github.com/ycyaoxdu/registration v0.6.1-0.20220825022246-bc0bfd1dc8f9 diff --git a/controlplane/go.sum b/controlplane/go.sum index 192f98544..9474baef0 100644 --- a/controlplane/go.sum +++ b/controlplane/go.sum @@ -143,6 +143,7 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= @@ -251,6 +252,7 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= @@ -262,6 +264,7 @@ github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwo github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= @@ -379,8 +382,9 @@ github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMS github.com/google/cel-go v0.11.2 h1:o16cOggWWtH1a3ZHQ8uWqt8nd255vDrEK1mDE1cFRSQ= github.com/google/cel-go v0.11.2/go.mod h1:drz+knCRsctDZ180KZHwIEEUb9IdK/nxPoyhxi+O1K0= github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= +github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -437,7 +441,6 @@ github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/Oth github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= @@ -487,12 +490,13 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -736,7 +740,6 @@ github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -777,14 +780,15 @@ github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17 github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vmware/govmomi v0.20.3 h1:gpw/0Ku+6RgF3jsi7fnCLmlcikBHfKBCUcu1qgc16OU= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/ycyaoxdu/registration v0.6.1-0.20220825022246-bc0bfd1dc8f9 h1:TpqHxHv68xZ+Nk2VU4l2hdMvNAqjbTDCkNI88/zRWfM= -github.com/ycyaoxdu/registration v0.6.1-0.20220825022246-bc0bfd1dc8f9/go.mod h1:aFM83oe4EmmG+ot8Q1rlP2nmfUuEOUAFk1BCPPrIekQ= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -859,7 +863,6 @@ go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= @@ -981,6 +984,7 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1230,6 +1234,8 @@ golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= +gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/gonum v0.6.2 h1:4r+yNT0+8SWcOkXP+63H2zQbN+USnC73cjGUxnDF94Q= @@ -1467,6 +1473,7 @@ gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= @@ -1505,7 +1512,6 @@ k8s.io/controller-manager v0.24.0/go.mod h1:ageMNQZc7cNH0FF1oarm7wZs6XyJj/V82nNV k8s.io/cri-api v0.24.0/go.mod h1:t3tImFtGeStN+ES69bQUX9sFg67ek38BM9YIJhMmuig= k8s.io/csi-translation-lib v0.24.0 h1:U56SfLSjpaSkrbR0PdEZXOAKbUKDQP80KV/LwFbix/g= k8s.io/csi-translation-lib v0.24.0/go.mod h1:jJaC3a1tI3IShByiAQmOOCl5PKpiZ51Vh70c9Eg2msM= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= @@ -1519,7 +1525,6 @@ k8s.io/kube-aggregator v0.24.0 h1:ax2B6v5y+sLISgal5COnlDRKOSr97uXpwif6nnK3a/M= k8s.io/kube-aggregator v0.24.0/go.mod h1:ftfs6Fi46z3cKzeF2kvNBPLbMlSKuqZbesJGNp/cQnw= k8s.io/kube-controller-manager v0.24.0 h1:A0IR8f3ShseLkEa0prJ+teRs/nBhRsC7GUb3yk6dG2o= k8s.io/kube-controller-manager v0.24.0/go.mod h1:s0pbwI8UuBEDdXQbTUpQdNIyU4rQ7jOxaXAcRBoWpJQ= -k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= k8s.io/kube-proxy v0.24.0/go.mod h1:OZ1k9jSwW94Rmj5hepCFea7qlGvvU+bfcosc6+dcFKA= @@ -1549,10 +1554,14 @@ modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= -open-cluster-management.io/api v0.8.0 h1:hQLNyvvdx0G0iNxq80RWp93epNsUtsqJdLmGbXiYG5o= -open-cluster-management.io/api v0.8.0/go.mod h1:+OEARSAl2jIhuLItUcS30UgLA3khmA9ihygLVxzEn+U= +open-cluster-management.io/api v0.9.0 h1:JQidCTMY3RdS0m3UDP24j6ykNpxWJ20LEdNtbxJApxo= +open-cluster-management.io/api v0.9.0/go.mod h1:+OEARSAl2jIhuLItUcS30UgLA3khmA9ihygLVxzEn+U= open-cluster-management.io/clusteradm v0.4.0 h1:UqTgXy7o/Tps1tJNNHbkjVRCFP+4cvn79w7tkoHEvAA= open-cluster-management.io/clusteradm v0.4.0/go.mod h1:nodp4RjHy/94guw4eLiYiZzpmk1D5ExXnZogGSHygoA= +open-cluster-management.io/placement v0.9.0 h1:pLt8iUboxDdL++i0fcrJu/gm0pvdV5vIeJFJSddcs3w= +open-cluster-management.io/placement v0.9.0/go.mod h1:SK7KuFptb3rj2che6FKjyjWj0w4VyDS1atHQcZVnX6U= +open-cluster-management.io/registration v0.9.1-0.20221104092257-2e1c039a8e17 h1:3TGSnpa0pXd/XJh9HUe6s+DvOFI8oHEckiJ9AYcX2JA= +open-cluster-management.io/registration v0.9.1-0.20221104092257-2e1c039a8e17/go.mod h1:SjNENYFNsvCo5J0KeXqur7c+/EqEps1w5oaZUW+2GoI= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= @@ -1560,6 +1569,7 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30 h1:dUk62HQ3ZFhD48Qr8MIXCiKA8wInBQCtuE4QGfFW7yA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio= +sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= sigs.k8s.io/controller-tools v0.4.1/go.mod h1:G9rHdZMVlBDocIxGkK3jHLWqcTMNvveypYJwrvYKjWU= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= diff --git a/controlplane/hack/crd-update/copy-crds.sh b/controlplane/hack/crd-update/copy-crds.sh new file mode 100755 index 000000000..f57609d92 --- /dev/null +++ b/controlplane/hack/crd-update/copy-crds.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +source "$(dirname "${BASH_SOURCE}")/init.sh" + +for f in $HUB_CRD_FILES +do + cp $f ./config/crds/ +done + diff --git a/controlplane/hack/crd-update/init.sh b/controlplane/hack/crd-update/init.sh new file mode 100644 index 000000000..e63d26d44 --- /dev/null +++ b/controlplane/hack/crd-update/init.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +set -o errexit +set -o nounset +set -o pipefail + +HUB_CRD_FILES="./vendor/open-cluster-management.io/api/cluster/v1/*.crd.yaml +./vendor/open-cluster-management.io/api/addon/v1alpha1/*.crd.yaml +./vendor/open-cluster-management.io/api/cluster/v1beta2/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml +./vendor/open-cluster-management.io/api/cluster/v1beta2/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml +./vendor/open-cluster-management.io/api/cluster/v1beta1/0000_02_clusters.open-cluster-management.io_placements.crd.yaml +./vendor/open-cluster-management.io/api/cluster/v1beta1/0000_03_clusters.open-cluster-management.io_placementdecisions.crd.yaml +./vendor/open-cluster-management.io/api/cluster/v1alpha1/0000_05_clusters.open-cluster-management.io_addonplacementscores.crd.yaml +./vendor/open-cluster-management.io/api/work/v1/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml +" diff --git a/controlplane/pkg/apiserver/kubeapiserver/aggregator.go b/controlplane/pkg/apiserver/kubeapiserver/aggregator.go index e1fe8e232..62c808d66 100644 --- a/controlplane/pkg/apiserver/kubeapiserver/aggregator.go +++ b/controlplane/pkg/apiserver/kubeapiserver/aggregator.go @@ -230,17 +230,36 @@ func createAggregatorServer(aggregatorConfig *aggregatorapiserver.Config, delega return nil, err } - // Add PostStartHook to install ocm controllers + // Add PostStartHook to install registration controllers err = aggregatorServer.GenericAPIServer.AddPostStartHook("ocm-controlplane-registration-controllers", func(context genericapiserver.PostStartHookContext) error { // Start controllers controllerConfig := rest.CopyConfig(aggregatorConfig.GenericConfig.LoopbackClientConfig) controllerConfig.ContentType = "application/json" go func() { - if err := ocmcontroller.InstallOCMHubControllers(goContext(context), controllerConfig); err != nil { - klog.Errorf("failed to bootstrap ocm hub controllers: %v", err) + if err := ocmcontroller.InstallRegistraionControllers(goContext(context), controllerConfig); err != nil { + klog.Errorf("failed to bootstrap ocm registration controllers: %v", err) } else { - klog.Infof("Finished bootstrapping ocm controllers") + klog.Infof("Finished bootstrapping ocm registration controllers") + } + }() + return nil + }) + if err != nil { + return nil, err + } + + // Add PostStartHook to install placement controllers + err = aggregatorServer.GenericAPIServer.AddPostStartHook("ocm-controlplane-placement-controllers", func(context genericapiserver.PostStartHookContext) error { + // Start controllers + controllerConfig := rest.CopyConfig(aggregatorConfig.GenericConfig.LoopbackClientConfig) + controllerConfig.ContentType = "application/json" + + go func() { + if err := ocmcontroller.InstallPlacementControllers(goContext(context), controllerConfig); err != nil { + klog.Errorf("failed to bootstrap ocm placement controllers: %v", err) + } else { + klog.Infof("Finished bootstrapping ocm placement controllers") } }() return nil diff --git a/controlplane/pkg/controllers/ocmcontroller/ocmcontroller.go b/controlplane/pkg/controllers/ocmcontroller/ocmcontroller.go index be0d24f0e..46dde6dc3 100644 --- a/controlplane/pkg/controllers/ocmcontroller/ocmcontroller.go +++ b/controlplane/pkg/controllers/ocmcontroller/ocmcontroller.go @@ -8,13 +8,14 @@ import ( "github.com/openshift/library-go/pkg/operator/events" "k8s.io/client-go/rest" - "open-cluster-management.io/registration/pkg/hub" + placement "open-cluster-management.io/placement/pkg/controllers" + registration "open-cluster-management.io/registration/pkg/hub" confighub "open-cluster-management.io/ocm-controlplane/config/hub" ) // TODO(ycyaoxdu): add placement controllers -func InstallOCMHubControllers(ctx context.Context, kubeConfig *rest.Config) error { +func InstallRegistraionControllers(ctx context.Context, kubeConfig *rest.Config) error { eventRecorder := events.NewInMemoryRecorder("registration-controller") controllerContext := &controllercmd.ControllerContext{ @@ -23,7 +24,21 @@ func InstallOCMHubControllers(ctx context.Context, kubeConfig *rest.Config) erro OperatorNamespace: confighub.HubNameSpace, } - hub.RunControllerManager(ctx, controllerContext) + registration.RunControllerManager(ctx, controllerContext) + + return nil +} + +func InstallPlacementControllers(ctx context.Context, kubeConfig *rest.Config) error { + eventRecorder := events.NewInMemoryRecorder("placement-controller") + + controllerContext := &controllercmd.ControllerContext{ + KubeConfig: kubeConfig, + EventRecorder: eventRecorder, + OperatorNamespace: confighub.HubNameSpace, + } + + placement.RunControllerManager(ctx, controllerContext) return nil } diff --git a/controlplane/plugin/admission/managedclustermutating/admission.go b/controlplane/plugin/admission/managedclustermutating/admission.go index 46875ce92..d71589588 100644 --- a/controlplane/plugin/admission/managedclustermutating/admission.go +++ b/controlplane/plugin/admission/managedclustermutating/admission.go @@ -12,7 +12,8 @@ import ( "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" "k8s.io/apiserver/pkg/admission/plugin/webhook/request" clusterv1api "open-cluster-management.io/api/cluster/v1" - clusterwebhook "open-cluster-management.io/registration/pkg/webhook/cluster" + clusterwebhookv1 "open-cluster-management.io/registration/pkg/webhook/v1" + runtimeadmission "sigs.k8s.io/controller-runtime/pkg/webhook/admission" admissionutil "open-cluster-management.io/ocm-controlplane/plugin/admission/util" ) @@ -27,6 +28,7 @@ func Register(plugins *admission.Plugins) { type Plugin struct { *admission.Handler + webhook *clusterwebhookv1.ManagedClusterWebhook } func (p *Plugin) ValidateInitialization() error { @@ -39,12 +41,11 @@ var _ admission.InitializationValidator = &Plugin{} func NewPlugin() *Plugin { return &Plugin{ Handler: admission.NewHandler(admission.Create, admission.Update), + webhook: &clusterwebhookv1.ManagedClusterWebhook{}, } } func (p *Plugin) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { - var mcm clusterwebhook.ManagedClusterMutatingAdmissionHook - v := generic.VersionedAttributes{ Attributes: a, VersionedOldObject: a.GetOldObject(), @@ -67,28 +68,21 @@ func (p *Plugin) Admit(ctx context.Context, a admission.Attributes, o admission. } uid := types.UID(uuid.NewUUID()) - ar := request.CreateV1beta1AdmissionReview(uid, &v, &i) - - obj := a.GetObject() - raw := runtime.RawExtension{} - err := admissionutil.Convert_runtime_Object_To_runtime_RawExtension_Raw(&obj, &raw) - if err != nil { - return fmt.Errorf("error occured in ManagedClusterMutating: failed to convert Object to RawExtension.") - } - ar.Request.Object = raw + ar := request.CreateV1AdmissionReview(uid, &v, &i) old := a.GetOldObject() oldRaw := runtime.RawExtension{} - err = admissionutil.Convert_runtime_Object_To_runtime_RawExtension_Raw(&old, &oldRaw) + err := admissionutil.Convert_runtime_Object_To_runtime_RawExtension_Raw(&old, &oldRaw) if err != nil { return fmt.Errorf("error occured in ManagedClusterMutating: failed to convert Object to RawExtension.") } ar.Request.OldObject = oldRaw - ar.Response = mcm.Admit(ar.Request) - - if !ar.Response.Allowed { - return fmt.Errorf("error occured in ManagedClusterMutating: [%d] %s", ar.Response.Result.Code, ar.Response.Result.Message) + r := runtimeadmission.Request{AdmissionRequest: *ar.Request} + admissionContext := runtimeadmission.NewContextWithRequest(ctx, r) + if err := p.webhook.Default(admissionContext, a.GetObject()); err != nil { + return err } + return nil } diff --git a/controlplane/plugin/admission/managedclustersetbindingvalidating/admission.go b/controlplane/plugin/admission/managedclustersetbindingvalidating/admission.go index ec824d736..34646e4d2 100644 --- a/controlplane/plugin/admission/managedclustersetbindingvalidating/admission.go +++ b/controlplane/plugin/admission/managedclustersetbindingvalidating/admission.go @@ -5,7 +5,6 @@ import ( "fmt" "io" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apiserver/pkg/admission" @@ -14,9 +13,8 @@ import ( "k8s.io/apiserver/pkg/admission/plugin/webhook/request" "k8s.io/client-go/kubernetes" clusterv1alpha1api "open-cluster-management.io/api/cluster/v1alpha1" - clustersetbindingwebhook "open-cluster-management.io/registration/pkg/webhook/clustersetbinding" - - admissionutil "open-cluster-management.io/ocm-controlplane/plugin/admission/util" + webhookv1beta2 "open-cluster-management.io/registration/pkg/webhook/v1beta2" + runtimeadmission "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) const PluginName = "ManagedClusterSetBindingValidating" @@ -29,16 +27,16 @@ func Register(plugins *admission.Plugins) { type Plugin struct { *admission.Handler - client kubernetes.Interface + webhook *webhookv1beta2.ManagedClusterSetBindingWebhook } func (p *Plugin) SetExternalKubeClientSet(client kubernetes.Interface) { - p.client = client + p.webhook.SetExternalKubeClientSet(client) } func (p *Plugin) ValidateInitialization() error { - if p.client == nil { - return fmt.Errorf("missing client") + if p.webhook == nil { + return fmt.Errorf("missing admission") } return nil } @@ -50,13 +48,11 @@ var _ = genericadmissioninitializer.WantsExternalKubeClientSet(&Plugin{}) func NewPlugin() *Plugin { return &Plugin{ Handler: admission.NewHandler(admission.Create, admission.Update), + webhook: &webhookv1beta2.ManagedClusterSetBindingWebhook{}, } } func (p *Plugin) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { - var mcsbv = clustersetbindingwebhook.ManagedClusterSetBindingValidatingAdmissionHook{} - mcsbv.SetKubeClient(p.client) - v := generic.VersionedAttributes{ Attributes: a, VersionedOldObject: a.GetOldObject(), @@ -79,28 +75,16 @@ func (p *Plugin) Validate(ctx context.Context, a admission.Attributes, o admissi } uid := types.UID(uuid.NewUUID()) - ar := request.CreateV1beta1AdmissionReview(uid, &v, &i) - - obj := a.GetObject() - raw := runtime.RawExtension{} - err := admissionutil.Convert_runtime_Object_To_runtime_RawExtension_Raw(&obj, &raw) - if err != nil { - return fmt.Errorf("error occured in ManagedClusterMutating: failed to convert Object to RawExtension") + ar := request.CreateV1AdmissionReview(uid, &v, &i) + + r := runtimeadmission.Request{AdmissionRequest: *ar.Request} + admissionContext := runtimeadmission.NewContextWithRequest(ctx, r) + switch a.GetOperation() { + case admission.Create: + return p.webhook.ValidateCreate(admissionContext, a.GetObject()) + case admission.Update: + return p.webhook.ValidateUpdate(admissionContext, a.GetOldObject(), a.GetObject()) } - ar.Request.Object = raw - old := a.GetOldObject() - oldRaw := runtime.RawExtension{} - err = admissionutil.Convert_runtime_Object_To_runtime_RawExtension_Raw(&old, &oldRaw) - if err != nil { - return fmt.Errorf("error occured in ManagedClusterMutating: failed to convert Object to RawExtension") - } - ar.Request.OldObject = oldRaw - - res := mcsbv.Validate(ar.Request) - - if !res.Allowed { - return fmt.Errorf("error occured in ManagedClusterSetBindingValidating: [%d] %s", res.Result.Code, res.Result.Message) - } return nil } diff --git a/controlplane/plugin/admission/managedclustervalidating/admission.go b/controlplane/plugin/admission/managedclustervalidating/admission.go index 9c0323407..06a1923a3 100644 --- a/controlplane/plugin/admission/managedclustervalidating/admission.go +++ b/controlplane/plugin/admission/managedclustervalidating/admission.go @@ -5,7 +5,6 @@ import ( "fmt" "io" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apiserver/pkg/admission" @@ -14,9 +13,8 @@ import ( "k8s.io/apiserver/pkg/admission/plugin/webhook/request" "k8s.io/client-go/kubernetes" clusterv1api "open-cluster-management.io/api/cluster/v1" - clusterwebhook "open-cluster-management.io/registration/pkg/webhook/cluster" - - admissionutil "open-cluster-management.io/ocm-controlplane/plugin/admission/util" + clusterwebhookv1 "open-cluster-management.io/registration/pkg/webhook/v1" + runtimeadmission "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) const PluginName = "ManagedClusterValidating" @@ -29,16 +27,16 @@ func Register(plugins *admission.Plugins) { type Plugin struct { *admission.Handler - client kubernetes.Interface + webhook *clusterwebhookv1.ManagedClusterWebhook } func (p *Plugin) SetExternalKubeClientSet(client kubernetes.Interface) { - p.client = client + p.webhook.SetExternalKubeClientSet(client) } func (p *Plugin) ValidateInitialization() error { - if p.client == nil { - return fmt.Errorf("missing client") + if p.webhook == nil { + return fmt.Errorf("missing webhook") } return nil } @@ -50,13 +48,11 @@ var _ = genericadmissioninitializer.WantsExternalKubeClientSet(&Plugin{}) func NewPlugin() *Plugin { return &Plugin{ Handler: admission.NewHandler(admission.Create, admission.Update), + webhook: &clusterwebhookv1.ManagedClusterWebhook{}, } } func (p *Plugin) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { - var mcv = clusterwebhook.ManagedClusterValidatingAdmissionHook{} - mcv.SetKubeClient(p.client) - v := generic.VersionedAttributes{ Attributes: a, VersionedOldObject: a.GetOldObject(), @@ -79,28 +75,15 @@ func (p *Plugin) Validate(ctx context.Context, a admission.Attributes, o admissi } uid := types.UID(uuid.NewUUID()) - ar := request.CreateV1beta1AdmissionReview(uid, &v, &i) - - obj := a.GetObject() - raw := runtime.RawExtension{} - err := admissionutil.Convert_runtime_Object_To_runtime_RawExtension_Raw(&obj, &raw) - if err != nil { - return fmt.Errorf("error occured in ManagedClusterMutating: failed to convert Object to RawExtension.") - } - ar.Request.Object = raw - - old := a.GetOldObject() - oldRaw := runtime.RawExtension{} - err = admissionutil.Convert_runtime_Object_To_runtime_RawExtension_Raw(&old, &oldRaw) - if err != nil { - return fmt.Errorf("error occured in ManagedClusterMutating: failed to convert Object to RawExtension.") - } - ar.Request.OldObject = oldRaw - - res := mcv.Validate(ar.Request) - - if !res.Allowed { - return fmt.Errorf("error occured in ManagedClusterValidating: [%d] %s", res.Result.Code, res.Result.Message) + ar := request.CreateV1AdmissionReview(uid, &v, &i) + + r := runtimeadmission.Request{AdmissionRequest: *ar.Request} + admissionContext := runtimeadmission.NewContextWithRequest(ctx, r) + switch a.GetOperation() { + case admission.Create: + return p.webhook.ValidateCreate(admissionContext, a.GetObject()) + case admission.Update: + return p.webhook.ValidateUpdate(admissionContext, a.GetOldObject(), a.GetObject()) } return nil diff --git a/controlplane/vendor/github.com/google/gnostic/jsonschema/display.go b/controlplane/vendor/github.com/google/gnostic/jsonschema/display.go index 028a760a9..8677ed49a 100644 --- a/controlplane/vendor/github.com/google/gnostic/jsonschema/display.go +++ b/controlplane/vendor/github.com/google/gnostic/jsonschema/display.go @@ -46,8 +46,23 @@ func (schema *Schema) describeSchema(indent string) string { if schema.Schema != nil { result += indent + "$schema: " + *(schema.Schema) + "\n" } + if schema.ReadOnly != nil && *schema.ReadOnly { + result += indent + fmt.Sprintf("readOnly: %+v\n", *(schema.ReadOnly)) + } + if schema.WriteOnly != nil && *schema.WriteOnly { + result += indent + fmt.Sprintf("writeOnly: %+v\n", *(schema.WriteOnly)) + } if schema.ID != nil { - result += indent + "id: " + *(schema.ID) + "\n" + switch strings.TrimSuffix(*schema.Schema, "#") { + case "http://json-schema.org/draft-04/schema#": + fallthrough + case "#": + fallthrough + case "": + result += indent + "id: " + *(schema.ID) + "\n" + default: + result += indent + "$id: " + *(schema.ID) + "\n" + } } if schema.MultipleOf != nil { result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf)) diff --git a/controlplane/vendor/github.com/google/gnostic/jsonschema/models.go b/controlplane/vendor/github.com/google/gnostic/jsonschema/models.go index 4781bdc5f..0d877249a 100644 --- a/controlplane/vendor/github.com/google/gnostic/jsonschema/models.go +++ b/controlplane/vendor/github.com/google/gnostic/jsonschema/models.go @@ -23,9 +23,11 @@ import "gopkg.in/yaml.v3" // All fields are pointers and are nil if the associated values // are not specified. type Schema struct { - Schema *string // $schema - ID *string // id keyword used for $ref resolution scope - Ref *string // $ref, i.e. JSON Pointers + Schema *string // $schema + ID *string // id keyword used for $ref resolution scope + Ref *string // $ref, i.e. JSON Pointers + ReadOnly *bool + WriteOnly *bool // http://json-schema.org/latest/json-schema-validation.html // 5.1. Validation keywords for numeric instances (number and integer) diff --git a/controlplane/vendor/github.com/google/gnostic/jsonschema/reader.go b/controlplane/vendor/github.com/google/gnostic/jsonschema/reader.go index b8583d466..a909a3412 100644 --- a/controlplane/vendor/github.com/google/gnostic/jsonschema/reader.go +++ b/controlplane/vendor/github.com/google/gnostic/jsonschema/reader.go @@ -165,7 +165,6 @@ func NewSchemaFromObject(jsonData *yaml.Node) *Schema { default: fmt.Printf("schemaValue: unexpected node %+v\n", jsonData) - return nil } return nil diff --git a/controlplane/vendor/github.com/google/gnostic/jsonschema/writer.go b/controlplane/vendor/github.com/google/gnostic/jsonschema/writer.go index 340dc5f93..15b1f9050 100644 --- a/controlplane/vendor/github.com/google/gnostic/jsonschema/writer.go +++ b/controlplane/vendor/github.com/google/gnostic/jsonschema/writer.go @@ -16,6 +16,7 @@ package jsonschema import ( "fmt" + "strings" "gopkg.in/yaml.v3" ) @@ -33,7 +34,11 @@ func renderMappingNode(node *yaml.Node, indent string) (result string) { value := node.Content[i+1] switch value.Kind { case yaml.ScalarNode: - result += "\"" + value.Value + "\"" + if value.Tag == "!!bool" { + result += value.Value + } else { + result += "\"" + value.Value + "\"" + } case yaml.MappingNode: result += renderMappingNode(value, innerIndent) case yaml.SequenceNode: @@ -58,7 +63,11 @@ func renderSequenceNode(node *yaml.Node, indent string) (result string) { item := node.Content[i] switch item.Kind { case yaml.ScalarNode: - result += innerIndent + "\"" + item.Value + "\"" + if item.Tag == "!!bool" { + result += innerIndent + item.Value + } else { + result += innerIndent + "\"" + item.Value + "\"" + } case yaml.MappingNode: result += innerIndent + renderMappingNode(item, innerIndent) + "" default: @@ -260,11 +269,26 @@ func (schema *Schema) nodeValue() *yaml.Node { content = appendPair(content, "title", nodeForString(*schema.Title)) } if schema.ID != nil { - content = appendPair(content, "id", nodeForString(*schema.ID)) + switch strings.TrimSuffix(*schema.Schema, "#") { + case "http://json-schema.org/draft-04/schema": + fallthrough + case "#": + fallthrough + case "": + content = appendPair(content, "id", nodeForString(*schema.ID)) + default: + content = appendPair(content, "$id", nodeForString(*schema.ID)) + } } if schema.Schema != nil { content = appendPair(content, "$schema", nodeForString(*schema.Schema)) } + if schema.ReadOnly != nil && *schema.ReadOnly { + content = appendPair(content, "readOnly", nodeForBoolean(*schema.ReadOnly)) + } + if schema.WriteOnly != nil && *schema.WriteOnly { + content = appendPair(content, "writeOnly", nodeForBoolean(*schema.WriteOnly)) + } if schema.Type != nil { content = appendPair(content, "type", schema.Type.nodeValue()) } diff --git a/controlplane/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go b/controlplane/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go index 0f1790766..28c2777d5 100644 --- a/controlplane/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go +++ b/controlplane/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go @@ -7887,7 +7887,12 @@ func (m *Oauth2Scopes) ToRawInfo() *yaml.Node { if m == nil { return info } - // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Value)) + } + } return info } diff --git a/controlplane/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go b/controlplane/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go index 5f4a7025e..d54a84db7 100644 --- a/controlplane/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go +++ b/controlplane/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go @@ -8560,7 +8560,12 @@ func (m *Strings) ToRawInfo() *yaml.Node { if m == nil { return info } - // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Value)) + } + } return info } diff --git a/controlplane/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go b/controlplane/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go index 499e7f932..90a56f552 100644 --- a/controlplane/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go +++ b/controlplane/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go @@ -16,8 +16,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.18.1 +// protoc-gen-go v1.28.0 +// protoc v3.19.4 // source: openapiv3/OpenAPIv3.proto package openapi_v3 @@ -6760,12 +6760,13 @@ var file_openapiv3_OpenAPIv3_proto_rawDesc = []byte{ 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x56, 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, + 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/controlplane/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto b/controlplane/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto index 1be335b89..7aede5ed9 100644 --- a/controlplane/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto +++ b/controlplane/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto @@ -42,7 +42,7 @@ option java_package = "org.openapi_v3"; option objc_class_prefix = "OAS"; // The Go package name. -option go_package = "./openapiv3;openapi_v3"; +option go_package = "github.com/google/gnostic/openapiv3;openapi_v3"; message AdditionalPropertiesItem { oneof oneof { diff --git a/controlplane/vendor/github.com/google/gnostic/openapiv3/README.md b/controlplane/vendor/github.com/google/gnostic/openapiv3/README.md index 5ee12d92e..83603b82a 100644 --- a/controlplane/vendor/github.com/google/gnostic/openapiv3/README.md +++ b/controlplane/vendor/github.com/google/gnostic/openapiv3/README.md @@ -19,3 +19,7 @@ for OpenAPI. The schema-generator directory contains support code which generates openapi-3.1.json from the OpenAPI 3.1 specification document (Markdown). + +### How to rebuild + +`protoc -I=. -I=third_party --go_out=. --go_opt=paths=source_relative openapiv3/*.proto` \ No newline at end of file diff --git a/controlplane/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go b/controlplane/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go new file mode 100644 index 000000000..ae242f304 --- /dev/null +++ b/controlplane/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go @@ -0,0 +1,183 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.19.4 +// source: openapiv3/annotations.proto + +package openapi_v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_openapiv3_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*Document)(nil), + Field: 1143, + Name: "openapi.v3.document", + Tag: "bytes,1143,opt,name=document", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*Operation)(nil), + Field: 1143, + Name: "openapi.v3.operation", + Tag: "bytes,1143,opt,name=operation", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1143, + Name: "openapi.v3.schema", + Tag: "bytes,1143,opt,name=schema", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1143, + Name: "openapi.v3.property", + Tag: "bytes,1143,opt,name=property", + Filename: "openapiv3/annotations.proto", + }, +} + +// Extension fields to descriptorpb.FileOptions. +var ( + // optional openapi.v3.Document document = 1143; + E_Document = &file_openapiv3_annotations_proto_extTypes[0] +) + +// Extension fields to descriptorpb.MethodOptions. +var ( + // optional openapi.v3.Operation operation = 1143; + E_Operation = &file_openapiv3_annotations_proto_extTypes[1] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional openapi.v3.Schema schema = 1143; + E_Schema = &file_openapiv3_annotations_proto_extTypes[2] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional openapi.v3.Schema property = 1143; + E_Property = &file_openapiv3_annotations_proto_extTypes[3] +) + +var File_openapiv3_annotations_proto protoreflect.FileDescriptor + +var file_openapiv3_annotations_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x76, 0x33, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4f, 0x0a, 0x08, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x64, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x54, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x4c, 0x0a, + 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x4e, 0x0a, 0x08, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x42, 0x5a, 0x0a, 0x0e, 0x6f, + 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x10, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, + 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_openapiv3_annotations_proto_goTypes = []interface{}{ + (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions + (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions + (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions + (*Document)(nil), // 4: openapi.v3.Document + (*Operation)(nil), // 5: openapi.v3.Operation + (*Schema)(nil), // 6: openapi.v3.Schema +} +var file_openapiv3_annotations_proto_depIdxs = []int32{ + 0, // 0: openapi.v3.document:extendee -> google.protobuf.FileOptions + 1, // 1: openapi.v3.operation:extendee -> google.protobuf.MethodOptions + 2, // 2: openapi.v3.schema:extendee -> google.protobuf.MessageOptions + 3, // 3: openapi.v3.property:extendee -> google.protobuf.FieldOptions + 4, // 4: openapi.v3.document:type_name -> openapi.v3.Document + 5, // 5: openapi.v3.operation:type_name -> openapi.v3.Operation + 6, // 6: openapi.v3.schema:type_name -> openapi.v3.Schema + 6, // 7: openapi.v3.property:type_name -> openapi.v3.Schema + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 4, // [4:8] is the sub-list for extension type_name + 0, // [0:4] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_openapiv3_annotations_proto_init() } +func file_openapiv3_annotations_proto_init() { + if File_openapiv3_annotations_proto != nil { + return + } + file_openapiv3_OpenAPIv3_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_openapiv3_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 4, + NumServices: 0, + }, + GoTypes: file_openapiv3_annotations_proto_goTypes, + DependencyIndexes: file_openapiv3_annotations_proto_depIdxs, + ExtensionInfos: file_openapiv3_annotations_proto_extTypes, + }.Build() + File_openapiv3_annotations_proto = out.File + file_openapiv3_annotations_proto_rawDesc = nil + file_openapiv3_annotations_proto_goTypes = nil + file_openapiv3_annotations_proto_depIdxs = nil +} diff --git a/controlplane/vendor/github.com/google/gnostic/openapiv3/annotations.proto b/controlplane/vendor/github.com/google/gnostic/openapiv3/annotations.proto new file mode 100644 index 000000000..0bd87810d --- /dev/null +++ b/controlplane/vendor/github.com/google/gnostic/openapiv3/annotations.proto @@ -0,0 +1,60 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package openapi.v3; + +import "openapiv3/OpenAPIv3.proto"; +import "google/protobuf/descriptor.proto"; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "AnnotationsProto"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v3"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +// The Go package name. +option go_package = "github.com/google/gnostic/openapiv3;openapi_v3"; + +extend google.protobuf.FileOptions { + Document document = 1143; +} + +extend google.protobuf.MethodOptions { + Operation operation = 1143; +} + +extend google.protobuf.MessageOptions { + Schema schema = 1143; +} + +extend google.protobuf.FieldOptions { + Schema property = 1143; +} \ No newline at end of file diff --git a/controlplane/vendor/github.com/imdario/mergo/README.md b/controlplane/vendor/github.com/imdario/mergo/README.md index aa8cbd7ce..7e6f7aeee 100644 --- a/controlplane/vendor/github.com/imdario/mergo/README.md +++ b/controlplane/vendor/github.com/imdario/mergo/README.md @@ -8,8 +8,7 @@ [![Coverage Status][9]][10] [![Sourcegraph][11]][12] [![FOSSA Status][13]][14] - -[![GoCenter Kudos][15]][16] +[![Become my sponsor][15]][16] [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo @@ -25,8 +24,8 @@ [12]: https://sourcegraph.com/github.com/imdario/mergo?badge [13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield [14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield -[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo -[16]: https://search.gocenter.io/github.com/imdario/mergo +[15]: https://img.shields.io/github/sponsors/imdario +[16]: https://github.com/sponsors/imdario A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. @@ -36,11 +35,11 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the ## Status -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). ### Important note -Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules. +Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. @@ -51,12 +50,12 @@ If you were using Mergo before April 6th, 2015, please check your project works If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: Buy Me a Coffee at ko-fi.com -[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) -[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) Donate using Liberapay +Become my sponsor ### Mergo in the wild +- [cli/cli](https://github.com/cli/cli) - [moby/moby](https://github.com/moby/moby) - [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - [vmware/dispatch](https://github.com/vmware/dispatch) @@ -98,6 +97,8 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont - [jnuthong/item_search](https://github.com/jnuthong/item_search) - [bukalapak/snowboard](https://github.com/bukalapak/snowboard) - [containerssh/containerssh](https://github.com/containerssh/containerssh) +- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) +- [tjpnz/structbot](https://github.com/tjpnz/structbot) ## Install @@ -168,7 +169,7 @@ func main() { Note: if test are failing due missing package, please execute: - go get gopkg.in/yaml.v2 + go get gopkg.in/yaml.v3 ### Transformers @@ -218,7 +219,6 @@ func main() { } ``` - ## Contact me If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) @@ -227,18 +227,6 @@ If I can help you, you have an idea or you are using Mergo in your projects, don Written by [Dario Castañé](http://dario.im). -## Top Contributors - -[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) -[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) -[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) -[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) -[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) -[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) -[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) -[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) - - ## License [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). diff --git a/controlplane/vendor/github.com/imdario/mergo/merge.go b/controlplane/vendor/github.com/imdario/mergo/merge.go index 8c2a8fcd9..8b4e2f47a 100644 --- a/controlplane/vendor/github.com/imdario/mergo/merge.go +++ b/controlplane/vendor/github.com/imdario/mergo/merge.go @@ -79,7 +79,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co visited[h] = &visit{addr, typ, seen} } - if config.Transformers != nil && !isEmptyValue(dst) { + if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { if fn := config.Transformers.Transformer(dst.Type()); fn != nil { err = fn(dst, src) return diff --git a/controlplane/vendor/github.com/imdario/mergo/mergo.go b/controlplane/vendor/github.com/imdario/mergo/mergo.go index 3cc926c7f..9fe362d47 100644 --- a/controlplane/vendor/github.com/imdario/mergo/mergo.go +++ b/controlplane/vendor/github.com/imdario/mergo/mergo.go @@ -17,7 +17,7 @@ import ( var ( ErrNilArguments = errors.New("src and dst must not be nil") ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs and maps are supported") + ErrNotSupported = errors.New("only structs, maps, and slices are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") ErrNonPointerAgument = errors.New("dst must be a pointer") @@ -65,7 +65,7 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { return } vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { err = ErrNotSupported return } diff --git a/controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go b/controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go new file mode 100644 index 000000000..f4c92913a --- /dev/null +++ b/controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go @@ -0,0 +1,40 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package collectors provides implementations of prometheus.Collector to +// conveniently collect process and Go-related metrics. +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// NewBuildInfoCollector returns a collector collecting a single metric +// "go_build_info" with the constant value 1 and three labels "path", "version", +// and "checksum". Their label values contain the main module path, version, and +// checksum, respectively. The labels will only have meaningful values if the +// binary is built with Go module support and from source code retrieved from +// the source repository (rather than the local file system). This is usually +// accomplished by building from outside of GOPATH, specifying the full address +// of the main package, e.g. "GO111MODULE=on go run +// github.com/prometheus/client_golang/examples/random". If built without Go +// module support, all label values will be "unknown". If built with Go module +// support but using the source code from the local file system, the "path" will +// be set appropriately, but "checksum" will be empty and "version" will be +// "(devel)". +// +// This collector uses only the build information for the main module. See +// https://github.com/povilasv/prommod for an example of a collector for the +// module dependencies. +func NewBuildInfoCollector() prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewBuildInfoCollector() +} diff --git a/controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go b/controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go new file mode 100644 index 000000000..d5a7279fb --- /dev/null +++ b/controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go @@ -0,0 +1,119 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import ( + "database/sql" + + "github.com/prometheus/client_golang/prometheus" +) + +type dbStatsCollector struct { + db *sql.DB + + maxOpenConnections *prometheus.Desc + + openConnections *prometheus.Desc + inUseConnections *prometheus.Desc + idleConnections *prometheus.Desc + + waitCount *prometheus.Desc + waitDuration *prometheus.Desc + maxIdleClosed *prometheus.Desc + maxIdleTimeClosed *prometheus.Desc + maxLifetimeClosed *prometheus.Desc +} + +// NewDBStatsCollector returns a collector that exports metrics about the given *sql.DB. +// See https://golang.org/pkg/database/sql/#DBStats for more information on stats. +func NewDBStatsCollector(db *sql.DB, dbName string) prometheus.Collector { + fqName := func(name string) string { + return "go_sql_" + name + } + return &dbStatsCollector{ + db: db, + maxOpenConnections: prometheus.NewDesc( + fqName("max_open_connections"), + "Maximum number of open connections to the database.", + nil, prometheus.Labels{"db_name": dbName}, + ), + openConnections: prometheus.NewDesc( + fqName("open_connections"), + "The number of established connections both in use and idle.", + nil, prometheus.Labels{"db_name": dbName}, + ), + inUseConnections: prometheus.NewDesc( + fqName("in_use_connections"), + "The number of connections currently in use.", + nil, prometheus.Labels{"db_name": dbName}, + ), + idleConnections: prometheus.NewDesc( + fqName("idle_connections"), + "The number of idle connections.", + nil, prometheus.Labels{"db_name": dbName}, + ), + waitCount: prometheus.NewDesc( + fqName("wait_count_total"), + "The total number of connections waited for.", + nil, prometheus.Labels{"db_name": dbName}, + ), + waitDuration: prometheus.NewDesc( + fqName("wait_duration_seconds_total"), + "The total time blocked waiting for a new connection.", + nil, prometheus.Labels{"db_name": dbName}, + ), + maxIdleClosed: prometheus.NewDesc( + fqName("max_idle_closed_total"), + "The total number of connections closed due to SetMaxIdleConns.", + nil, prometheus.Labels{"db_name": dbName}, + ), + maxIdleTimeClosed: prometheus.NewDesc( + fqName("max_idle_time_closed_total"), + "The total number of connections closed due to SetConnMaxIdleTime.", + nil, prometheus.Labels{"db_name": dbName}, + ), + maxLifetimeClosed: prometheus.NewDesc( + fqName("max_lifetime_closed_total"), + "The total number of connections closed due to SetConnMaxLifetime.", + nil, prometheus.Labels{"db_name": dbName}, + ), + } +} + +// Describe implements Collector. +func (c *dbStatsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.maxOpenConnections + ch <- c.openConnections + ch <- c.inUseConnections + ch <- c.idleConnections + ch <- c.waitCount + ch <- c.waitDuration + ch <- c.maxIdleClosed + ch <- c.maxLifetimeClosed + ch <- c.maxIdleTimeClosed +} + +// Collect implements Collector. +func (c *dbStatsCollector) Collect(ch chan<- prometheus.Metric) { + stats := c.db.Stats() + ch <- prometheus.MustNewConstMetric(c.maxOpenConnections, prometheus.GaugeValue, float64(stats.MaxOpenConnections)) + ch <- prometheus.MustNewConstMetric(c.openConnections, prometheus.GaugeValue, float64(stats.OpenConnections)) + ch <- prometheus.MustNewConstMetric(c.inUseConnections, prometheus.GaugeValue, float64(stats.InUse)) + ch <- prometheus.MustNewConstMetric(c.idleConnections, prometheus.GaugeValue, float64(stats.Idle)) + ch <- prometheus.MustNewConstMetric(c.waitCount, prometheus.CounterValue, float64(stats.WaitCount)) + ch <- prometheus.MustNewConstMetric(c.waitDuration, prometheus.CounterValue, stats.WaitDuration.Seconds()) + ch <- prometheus.MustNewConstMetric(c.maxIdleClosed, prometheus.CounterValue, float64(stats.MaxIdleClosed)) + ch <- prometheus.MustNewConstMetric(c.maxLifetimeClosed, prometheus.CounterValue, float64(stats.MaxLifetimeClosed)) + ch <- prometheus.MustNewConstMetric(c.maxIdleTimeClosed, prometheus.CounterValue, float64(stats.MaxIdleTimeClosed)) +} diff --git a/controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go b/controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go new file mode 100644 index 000000000..3aa8d0590 --- /dev/null +++ b/controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go @@ -0,0 +1,57 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// NewExpvarCollector returns a newly allocated expvar Collector. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*prometheus.Desc) prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewExpvarCollector(exports) +} diff --git a/controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go b/controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go new file mode 100644 index 000000000..effc57840 --- /dev/null +++ b/controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go @@ -0,0 +1,49 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.17 +// +build !go1.17 + +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// NewGoCollector returns a collector that exports metrics about the current Go +// process. This includes memory stats. To collect those, runtime.ReadMemStats +// is called. This requires to “stop the world”, which usually only happens for +// garbage collection (GC). Take the following implications into account when +// deciding whether to use the Go collector: +// +// 1. The performance impact of stopping the world is the more relevant the more +// frequently metrics are collected. However, with Go1.9 or later the +// stop-the-world time per metrics collection is very short (~25µs) so that the +// performance impact will only matter in rare cases. However, with older Go +// versions, the stop-the-world duration depends on the heap size and can be +// quite significant (~1.7 ms/GiB as per +// https://go-review.googlesource.com/c/go/+/34937). +// +// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the +// metrics collection happens to coincide with GC, it will only complete after +// GC has finished. Usually, GC is fast enough to not cause problems. However, +// with a very large heap, GC might take multiple seconds, which is enough to +// cause scrape timeouts in common setups. To avoid this problem, the Go +// collector will use the memstats from a previous collection if +// runtime.ReadMemStats takes more than 1s. However, if there are no previously +// collected memstats, or their collection is more than 5m ago, the collection +// will block until runtime.ReadMemStats succeeds. +// +// NOTE: The problem is solved in Go 1.15, see +// https://github.com/golang/go/issues/19812 for the related Go issue. +func NewGoCollector() prometheus.Collector { + return prometheus.NewGoCollector() +} diff --git a/controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go new file mode 100644 index 000000000..246c5ea94 --- /dev/null +++ b/controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go @@ -0,0 +1,160 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.17 +// +build go1.17 + +package collectors + +import ( + "regexp" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/internal" +) + +var ( + // MetricsAll allows all the metrics to be collected from Go runtime. + MetricsAll = GoRuntimeMetricsRule{regexp.MustCompile("/.*")} + // MetricsGC allows only GC metrics to be collected from Go runtime. + // e.g. go_gc_cycles_automatic_gc_cycles_total + MetricsGC = GoRuntimeMetricsRule{regexp.MustCompile(`^/gc/.*`)} + // MetricsMemory allows only memory metrics to be collected from Go runtime. + // e.g. go_memory_classes_heap_free_bytes + MetricsMemory = GoRuntimeMetricsRule{regexp.MustCompile(`^/memory/.*`)} + // MetricsScheduler allows only scheduler metrics to be collected from Go runtime. + // e.g. go_sched_goroutines_goroutines + MetricsScheduler = GoRuntimeMetricsRule{regexp.MustCompile(`^/sched/.*`)} +) + +// WithGoCollectorMemStatsMetricsDisabled disables metrics that is gathered in runtime.MemStats structure such as: +// +// go_memstats_alloc_bytes +// go_memstats_alloc_bytes_total +// go_memstats_sys_bytes +// go_memstats_lookups_total +// go_memstats_mallocs_total +// go_memstats_frees_total +// go_memstats_heap_alloc_bytes +// go_memstats_heap_sys_bytes +// go_memstats_heap_idle_bytes +// go_memstats_heap_inuse_bytes +// go_memstats_heap_released_bytes +// go_memstats_heap_objects +// go_memstats_stack_inuse_bytes +// go_memstats_stack_sys_bytes +// go_memstats_mspan_inuse_bytes +// go_memstats_mspan_sys_bytes +// go_memstats_mcache_inuse_bytes +// go_memstats_mcache_sys_bytes +// go_memstats_buck_hash_sys_bytes +// go_memstats_gc_sys_bytes +// go_memstats_other_sys_bytes +// go_memstats_next_gc_bytes +// +// so the metrics known from pre client_golang v1.12.0, +// +// NOTE(bwplotka): The above represents runtime.MemStats statistics, but they are +// actually implemented using new runtime/metrics package. (except skipped go_memstats_gc_cpu_fraction +// -- see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 for explanation). +// +// Some users might want to disable this on collector level (although you can use scrape relabelling on Prometheus), +// because similar metrics can be now obtained using WithGoCollectorRuntimeMetrics. Note that the semantics of new +// metrics might be different, plus the names can be change over time with different Go version. +// +// NOTE(bwplotka): Changing metric names can be tedious at times as the alerts, recording rules and dashboards have to be adjusted. +// The old metrics are also very useful, with many guides and books written about how to interpret them. +// +// As a result our recommendation would be to stick with MemStats like metrics and enable other runtime/metrics if you are interested +// in advanced insights Go provides. See ExampleGoCollector_WithAdvancedGoMetrics. +func WithGoCollectorMemStatsMetricsDisabled() func(options *internal.GoCollectorOptions) { + return func(o *internal.GoCollectorOptions) { + o.DisableMemStatsLikeMetrics = true + } +} + +// GoRuntimeMetricsRule allow enabling and configuring particular group of runtime/metrics. +// TODO(bwplotka): Consider adding ability to adjust buckets. +type GoRuntimeMetricsRule struct { + // Matcher represents RE2 expression will match the runtime/metrics from https://golang.bg/src/runtime/metrics/description.go + // Use `regexp.MustCompile` or `regexp.Compile` to create this field. + Matcher *regexp.Regexp +} + +// WithGoCollectorRuntimeMetrics allows enabling and configuring particular group of runtime/metrics. +// See the list of metrics https://golang.bg/src/runtime/metrics/description.go (pick the Go version you use there!). +// You can use this option in repeated manner, which will add new rules. The order of rules is important, the last rule +// that matches particular metrics is applied. +func WithGoCollectorRuntimeMetrics(rules ...GoRuntimeMetricsRule) func(options *internal.GoCollectorOptions) { + rs := make([]internal.GoCollectorRule, len(rules)) + for i, r := range rules { + rs[i] = internal.GoCollectorRule{ + Matcher: r.Matcher, + } + } + + return func(o *internal.GoCollectorOptions) { + o.RuntimeMetricRules = append(o.RuntimeMetricRules, rs...) + } +} + +// WithoutGoCollectorRuntimeMetrics allows disabling group of runtime/metrics that you might have added in WithGoCollectorRuntimeMetrics. +// It behaves similarly to WithGoCollectorRuntimeMetrics just with deny-list semantics. +func WithoutGoCollectorRuntimeMetrics(matchers ...*regexp.Regexp) func(options *internal.GoCollectorOptions) { + rs := make([]internal.GoCollectorRule, len(matchers)) + for i, m := range matchers { + rs[i] = internal.GoCollectorRule{ + Matcher: m, + Deny: true, + } + } + + return func(o *internal.GoCollectorOptions) { + o.RuntimeMetricRules = append(o.RuntimeMetricRules, rs...) + } +} + +// GoCollectionOption represents Go collection option flag. +// Deprecated. +type GoCollectionOption uint32 + +const ( + // GoRuntimeMemStatsCollection represents the metrics represented by runtime.MemStats structure. + // Deprecated. Use WithGoCollectorMemStatsMetricsDisabled() function to disable those metrics in the collector. + GoRuntimeMemStatsCollection GoCollectionOption = 1 << iota + // GoRuntimeMetricsCollection is the new set of metrics represented by runtime/metrics package. + // Deprecated. Use WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")}) + // function to enable those metrics in the collector. + GoRuntimeMetricsCollection +) + +// WithGoCollections allows enabling different collections for Go collector on top of base metrics. +// Deprecated. Use WithGoCollectorRuntimeMetrics() and WithGoCollectorMemStatsMetricsDisabled() instead to control metrics. +func WithGoCollections(flags GoCollectionOption) func(options *internal.GoCollectorOptions) { + return func(options *internal.GoCollectorOptions) { + if flags&GoRuntimeMemStatsCollection == 0 { + WithGoCollectorMemStatsMetricsDisabled()(options) + } + + if flags&GoRuntimeMetricsCollection != 0 { + WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")})(options) + } + } +} + +// NewGoCollector returns a collector that exports metrics about the current Go +// process using debug.GCStats (base metrics) and runtime/metrics (both in MemStats style and new ones). +func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewGoCollector(opts...) +} diff --git a/controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go b/controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go new file mode 100644 index 000000000..24558f50a --- /dev/null +++ b/controlplane/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go @@ -0,0 +1,56 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// ProcessCollectorOpts defines the behavior of a process metrics collector +// created with NewProcessCollector. +type ProcessCollectorOpts struct { + // PidFn returns the PID of the process the collector collects metrics + // for. It is called upon each collection. By default, the PID of the + // current process is used, as determined on construction time by + // calling os.Getpid(). + PidFn func() (int, error) + // If non-empty, each of the collected metrics is prefixed by the + // provided string and an underscore ("_"). + Namespace string + // If true, any error encountered during collection is reported as an + // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored + // and the collected metrics will be incomplete. (Possibly, no metrics + // will be collected at all.) While that's usually not desired, it is + // appropriate for the common "mix-in" of process metrics, where process + // metrics are nice to have, but failing to collect them should not + // disrupt the collection of the remaining metrics. + ReportErrors bool +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including CPU, memory and file descriptor usage as well as +// the process start time. The detailed behavior is defined by the provided +// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a +// collector for the current process with an empty namespace string and no error +// reporting. +// +// The collector only works on operating systems with a Linux-style proc +// filesystem and on Microsoft Windows. On other operating systems, it will not +// collect any metrics. +func NewProcessCollector(opts ProcessCollectorOpts) prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{ + PidFn: opts.PidFn, + Namespace: opts.Namespace, + ReportErrors: opts.ReportErrors, + }) +} diff --git a/controlplane/vendor/gomodules.xyz/jsonpatch/v2/LICENSE b/controlplane/vendor/gomodules.xyz/jsonpatch/v2/LICENSE new file mode 100644 index 000000000..8f71f43fe --- /dev/null +++ b/controlplane/vendor/gomodules.xyz/jsonpatch/v2/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/controlplane/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go b/controlplane/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go new file mode 100644 index 000000000..0ffb31560 --- /dev/null +++ b/controlplane/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go @@ -0,0 +1,334 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strings" +) + +var errBadJSONDoc = fmt.Errorf("invalid JSON Document") + +type JsonPatchOperation = Operation + +type Operation struct { + Operation string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value,omitempty"` +} + +func (j *Operation) Json() string { + b, _ := json.Marshal(j) + return string(b) +} + +func (j *Operation) MarshalJSON() ([]byte, error) { + var b bytes.Buffer + b.WriteString("{") + b.WriteString(fmt.Sprintf(`"op":"%s"`, j.Operation)) + b.WriteString(fmt.Sprintf(`,"path":"%s"`, j.Path)) + // Consider omitting Value for non-nullable operations. + if j.Value != nil || j.Operation == "replace" || j.Operation == "add" { + v, err := json.Marshal(j.Value) + if err != nil { + return nil, err + } + b.WriteString(`,"value":`) + b.Write(v) + } + b.WriteString("}") + return b.Bytes(), nil +} + +type ByPath []Operation + +func (a ByPath) Len() int { return len(a) } +func (a ByPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a ByPath) Less(i, j int) bool { return a[i].Path < a[j].Path } + +func NewOperation(op, path string, value interface{}) Operation { + return Operation{Operation: op, Path: path, Value: value} +} + +// CreatePatch creates a patch as specified in http://jsonpatch.com/ +// +// 'a' is original, 'b' is the modified document. Both are to be given as json encoded content. +// The function will return an array of JsonPatchOperations +// +// An error will be returned if any of the two documents are invalid. +func CreatePatch(a, b []byte) ([]Operation, error) { + var aI interface{} + var bI interface{} + err := json.Unmarshal(a, &aI) + if err != nil { + return nil, errBadJSONDoc + } + err = json.Unmarshal(b, &bI) + if err != nil { + return nil, errBadJSONDoc + } + return handleValues(aI, bI, "", []Operation{}) +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt, ok := bv.(string) + if ok && bt == at { + return true + } + case float64: + bt, ok := bv.(float64) + if ok && bt == at { + return true + } + case bool: + bt, ok := bv.(bool) + if ok && bt == at { + return true + } + case map[string]interface{}: + bt, ok := bv.(map[string]interface{}) + if !ok { + return false + } + for key := range at { + if !matchesValue(at[key], bt[key]) { + return false + } + } + for key := range bt { + if !matchesValue(at[key], bt[key]) { + return false + } + } + return true + case []interface{}: + bt, ok := bv.([]interface{}) + if !ok { + return false + } + if len(bt) != len(at) { + return false + } + for key := range at { + if !matchesValue(at[key], bt[key]) { + return false + } + } + for key := range bt { + if !matchesValue(at[key], bt[key]) { + return false + } + } + return true + } + return false +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. +// TODO decode support: +// var rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") + +var rfc6901Encoder = strings.NewReplacer("~", "~0", "/", "~1") + +func makePath(path string, newPart interface{}) string { + key := rfc6901Encoder.Replace(fmt.Sprintf("%v", newPart)) + if path == "" { + return "/" + key + } + if strings.HasSuffix(path, "/") { + return path + key + } + return path + "/" + key +} + +// diff returns the (recursive) difference between a and b as an array of JsonPatchOperations. +func diff(a, b map[string]interface{}, path string, patch []Operation) ([]Operation, error) { + for key, bv := range b { + p := makePath(path, key) + av, ok := a[key] + // value was added + if !ok { + patch = append(patch, NewOperation("add", p, bv)) + continue + } + // Types are the same, compare values + var err error + patch, err = handleValues(av, bv, p, patch) + if err != nil { + return nil, err + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + p := makePath(path, key) + + patch = append(patch, NewOperation("remove", p, nil)) + } + } + return patch, nil +} + +func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation, error) { + { + at := reflect.TypeOf(av) + bt := reflect.TypeOf(bv) + if at == nil && bt == nil { + // do nothing + return patch, nil + } else if at != bt { + // If types have changed, replace completely (preserves null in destination) + return append(patch, NewOperation("replace", p, bv)), nil + } + } + + var err error + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + patch, err = diff(at, bt, p, patch) + if err != nil { + return nil, err + } + case string, float64, bool: + if !matchesValue(av, bv) { + patch = append(patch, NewOperation("replace", p, bv)) + } + case []interface{}: + bt := bv.([]interface{}) + if isSimpleArray(at) && isSimpleArray(bt) { + patch = append(patch, compareEditDistance(at, bt, p)...) + } else { + n := min(len(at), len(bt)) + for i := len(at) - 1; i >= n; i-- { + patch = append(patch, NewOperation("remove", makePath(p, i), nil)) + } + for i := n; i < len(bt); i++ { + patch = append(patch, NewOperation("add", makePath(p, i), bt[i])) + } + for i := 0; i < n; i++ { + var err error + patch, err = handleValues(at[i], bt[i], makePath(p, i), patch) + if err != nil { + return nil, err + } + } + } + default: + panic(fmt.Sprintf("Unknown type:%T ", av)) + } + return patch, nil +} + +func isBasicType(a interface{}) bool { + switch a.(type) { + case string, float64, bool: + default: + return false + } + return true +} + +func isSimpleArray(a []interface{}) bool { + for i := range a { + switch a[i].(type) { + case string, float64, bool: + default: + val := reflect.ValueOf(a[i]) + if val.Kind() == reflect.Map { + for _, k := range val.MapKeys() { + av := val.MapIndex(k) + if av.Kind() == reflect.Ptr || av.Kind() == reflect.Interface { + if av.IsNil() { + continue + } + av = av.Elem() + } + if av.Kind() != reflect.String && av.Kind() != reflect.Float64 && av.Kind() != reflect.Bool { + return false + } + } + return true + } + return false + } + } + return true +} + +// https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm +// Adapted from https://github.com/texttheater/golang-levenshtein +func compareEditDistance(s, t []interface{}, p string) []Operation { + m := len(s) + n := len(t) + + d := make([][]int, m+1) + for i := 0; i <= m; i++ { + d[i] = make([]int, n+1) + d[i][0] = i + } + for j := 0; j <= n; j++ { + d[0][j] = j + } + + for j := 1; j <= n; j++ { + for i := 1; i <= m; i++ { + if reflect.DeepEqual(s[i-1], t[j-1]) { + d[i][j] = d[i-1][j-1] // no op required + } else { + del := d[i-1][j] + 1 + add := d[i][j-1] + 1 + rep := d[i-1][j-1] + 1 + d[i][j] = min(rep, min(add, del)) + } + } + } + + return backtrace(s, t, p, m, n, d) +} + +func min(x int, y int) int { + if y < x { + return y + } + return x +} + +func backtrace(s, t []interface{}, p string, i int, j int, matrix [][]int) []Operation { + if i > 0 && matrix[i-1][j]+1 == matrix[i][j] { + op := NewOperation("remove", makePath(p, i-1), nil) + return append([]Operation{op}, backtrace(s, t, p, i-1, j, matrix)...) + } + if j > 0 && matrix[i][j-1]+1 == matrix[i][j] { + op := NewOperation("add", makePath(p, i), t[j-1]) + return append([]Operation{op}, backtrace(s, t, p, i, j-1, matrix)...) + } + if i > 0 && j > 0 && matrix[i-1][j-1]+1 == matrix[i][j] { + if isBasicType(s[0]) { + op := NewOperation("replace", makePath(p, i-1), t[j-1]) + return append([]Operation{op}, backtrace(s, t, p, i-1, j-1, matrix)...) + } + + p2, _ := handleValues(s[i-1], t[j-1], makePath(p, i-1), []Operation{}) + return append(p2, backtrace(s, t, p, i-1, j-1, matrix)...) + } + if i > 0 && j > 0 && matrix[i-1][j-1] == matrix[i][j] { + return backtrace(s, t, p, i-1, j-1, matrix) + } + return []Operation{} +} diff --git a/controlplane/vendor/modules.txt b/controlplane/vendor/modules.txt index b98b1e126..5fc637a65 100644 --- a/controlplane/vendor/modules.txt +++ b/controlplane/vendor/modules.txt @@ -248,7 +248,7 @@ github.com/google/cel-go/interpreter github.com/google/cel-go/interpreter/functions github.com/google/cel-go/parser github.com/google/cel-go/parser/gen -# github.com/google/gnostic v0.5.7-v3refs +# github.com/google/gnostic v0.6.9 ## explicit; go 1.12 github.com/google/gnostic/compiler github.com/google/gnostic/extensions @@ -333,7 +333,7 @@ github.com/grpc-ecosystem/go-grpc-prometheus github.com/grpc-ecosystem/grpc-gateway/internal github.com/grpc-ecosystem/grpc-gateway/runtime github.com/grpc-ecosystem/grpc-gateway/utilities -# github.com/imdario/mergo v0.3.12 +# github.com/imdario/mergo v0.3.13 ## explicit; go 1.13 github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.0.1 @@ -511,6 +511,7 @@ github.com/pquerna/cachecontrol/cacheobject # github.com/prometheus/client_golang v1.13.0 ## explicit; go 1.17 github.com/prometheus/client_golang/prometheus +github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/testutil @@ -799,8 +800,6 @@ go.starlark.net/syntax # go.uber.org/atomic v1.10.0 ## explicit; go 1.18 go.uber.org/atomic -# go.uber.org/goleak v1.1.12 -## explicit; go 1.13 # go.uber.org/multierr v1.8.0 ## explicit; go 1.14 go.uber.org/multierr @@ -883,6 +882,9 @@ golang.org/x/time/rate # golang.org/x/tools v0.1.12 ## explicit; go 1.18 golang.org/x/tools/container/intsets +# gomodules.xyz/jsonpatch/v2 v2.2.0 +## explicit; go 1.12 +gomodules.xyz/jsonpatch/v2 # gonum.org/v1/gonum v0.6.2 ## explicit; go 1.12 gonum.org/v1/gonum/blas @@ -1803,7 +1805,7 @@ k8s.io/kube-aggregator/pkg/registry/apiservice/rest # k8s.io/kube-controller-manager v0.0.0 => k8s.io/kube-controller-manager v0.24.0 ## explicit; go 1.16 k8s.io/kube-controller-manager/config/v1alpha1 -# k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 +# k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8 => k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 ## explicit; go 1.16 k8s.io/kube-openapi/pkg/aggregator k8s.io/kube-openapi/pkg/builder @@ -2366,7 +2368,7 @@ k8s.io/utils/pointer k8s.io/utils/strings k8s.io/utils/strings/slices k8s.io/utils/trace -# open-cluster-management.io/api v0.8.0 +# open-cluster-management.io/api v0.9.0 ## explicit; go 1.18 open-cluster-management.io/api/addon/v1alpha1 open-cluster-management.io/api/client/addon/clientset/versioned @@ -2382,15 +2384,18 @@ open-cluster-management.io/api/client/cluster/clientset/versioned/scheme open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1 open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1 open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta1 +open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta2 open-cluster-management.io/api/client/cluster/informers/externalversions open-cluster-management.io/api/client/cluster/informers/externalversions/cluster open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1 open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1alpha1 open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta1 +open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta2 open-cluster-management.io/api/client/cluster/informers/externalversions/internalinterfaces open-cluster-management.io/api/client/cluster/listers/cluster/v1 open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1 open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1 +open-cluster-management.io/api/client/cluster/listers/cluster/v1beta2 open-cluster-management.io/api/client/work/clientset/versioned open-cluster-management.io/api/client/work/clientset/versioned/scheme open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1 @@ -2402,14 +2407,28 @@ open-cluster-management.io/api/client/work/listers/work/v1 open-cluster-management.io/api/cluster/v1 open-cluster-management.io/api/cluster/v1alpha1 open-cluster-management.io/api/cluster/v1beta1 +open-cluster-management.io/api/cluster/v1beta2 open-cluster-management.io/api/feature open-cluster-management.io/api/work/v1 # open-cluster-management.io/clusteradm v0.4.0 ## explicit; go 1.17 open-cluster-management.io/clusteradm/pkg/config open-cluster-management.io/clusteradm/pkg/helpers -# open-cluster-management.io/registration v0.7.0 => github.com/ycyaoxdu/registration v0.6.1-0.20220825022246-bc0bfd1dc8f9 +# open-cluster-management.io/placement v0.9.0 ## explicit; go 1.18 +open-cluster-management.io/placement/pkg/controllers +open-cluster-management.io/placement/pkg/controllers/framework +open-cluster-management.io/placement/pkg/controllers/scheduling +open-cluster-management.io/placement/pkg/debugger +open-cluster-management.io/placement/pkg/plugins +open-cluster-management.io/placement/pkg/plugins/addon +open-cluster-management.io/placement/pkg/plugins/balance +open-cluster-management.io/placement/pkg/plugins/predicate +open-cluster-management.io/placement/pkg/plugins/resource +open-cluster-management.io/placement/pkg/plugins/steady +open-cluster-management.io/placement/pkg/plugins/tainttoleration +# open-cluster-management.io/registration v0.9.1-0.20221104092257-2e1c039a8e17 +## explicit; go 1.19 open-cluster-management.io/registration/pkg/features open-cluster-management.io/registration/pkg/helpers open-cluster-management.io/registration/pkg/hub @@ -2423,12 +2442,56 @@ open-cluster-management.io/registration/pkg/hub/managedclustersetbinding open-cluster-management.io/registration/pkg/hub/rbacfinalizerdeletion open-cluster-management.io/registration/pkg/hub/taint open-cluster-management.io/registration/pkg/hub/user -open-cluster-management.io/registration/pkg/webhook/cluster -open-cluster-management.io/registration/pkg/webhook/clustersetbinding +open-cluster-management.io/registration/pkg/webhook/v1 +open-cluster-management.io/registration/pkg/webhook/v1beta1 +open-cluster-management.io/registration/pkg/webhook/v1beta2 # sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30 ## explicit; go 1.17 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client +# sigs.k8s.io/controller-runtime v0.12.3 +## explicit; go 1.17 +sigs.k8s.io/controller-runtime +sigs.k8s.io/controller-runtime/pkg/builder +sigs.k8s.io/controller-runtime/pkg/cache +sigs.k8s.io/controller-runtime/pkg/cache/internal +sigs.k8s.io/controller-runtime/pkg/certwatcher +sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics +sigs.k8s.io/controller-runtime/pkg/client +sigs.k8s.io/controller-runtime/pkg/client/apiutil +sigs.k8s.io/controller-runtime/pkg/client/config +sigs.k8s.io/controller-runtime/pkg/cluster +sigs.k8s.io/controller-runtime/pkg/config +sigs.k8s.io/controller-runtime/pkg/config/v1alpha1 +sigs.k8s.io/controller-runtime/pkg/controller +sigs.k8s.io/controller-runtime/pkg/controller/controllerutil +sigs.k8s.io/controller-runtime/pkg/conversion +sigs.k8s.io/controller-runtime/pkg/event +sigs.k8s.io/controller-runtime/pkg/handler +sigs.k8s.io/controller-runtime/pkg/healthz +sigs.k8s.io/controller-runtime/pkg/internal/controller +sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics +sigs.k8s.io/controller-runtime/pkg/internal/httpserver +sigs.k8s.io/controller-runtime/pkg/internal/log +sigs.k8s.io/controller-runtime/pkg/internal/objectutil +sigs.k8s.io/controller-runtime/pkg/internal/recorder +sigs.k8s.io/controller-runtime/pkg/leaderelection +sigs.k8s.io/controller-runtime/pkg/log +sigs.k8s.io/controller-runtime/pkg/manager +sigs.k8s.io/controller-runtime/pkg/manager/signals +sigs.k8s.io/controller-runtime/pkg/metrics +sigs.k8s.io/controller-runtime/pkg/predicate +sigs.k8s.io/controller-runtime/pkg/ratelimiter +sigs.k8s.io/controller-runtime/pkg/reconcile +sigs.k8s.io/controller-runtime/pkg/recorder +sigs.k8s.io/controller-runtime/pkg/runtime/inject +sigs.k8s.io/controller-runtime/pkg/scheme +sigs.k8s.io/controller-runtime/pkg/source +sigs.k8s.io/controller-runtime/pkg/source/internal +sigs.k8s.io/controller-runtime/pkg/webhook +sigs.k8s.io/controller-runtime/pkg/webhook/admission +sigs.k8s.io/controller-runtime/pkg/webhook/conversion +sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics # sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 ## explicit; go 1.17 sigs.k8s.io/json @@ -2559,6 +2622,7 @@ sigs.k8s.io/yaml # k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.24.0 # k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.24.0 # k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.24.0 +# k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 # k8s.io/kube-proxy => k8s.io/kube-proxy v0.24.0 # k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.24.0 # k8s.io/kubectl => k8s.io/kubectl v0.24.0 @@ -2568,4 +2632,3 @@ sigs.k8s.io/yaml # k8s.io/mount-utils => k8s.io/mount-utils v0.24.0 # k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.24.0 # k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.24.0 -# open-cluster-management.io/registration => github.com/ycyaoxdu/registration v0.6.1-0.20220825022246-bc0bfd1dc8f9 diff --git a/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml b/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml index 6326a7b48..08bdb4056 100644 --- a/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml +++ b/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml @@ -38,7 +38,7 @@ spec: type: object properties: addOnConfiguration: - description: addOnConfiguration is a reference to configuration information for the add-on. In scenario where a multiple add-ons share the same add-on CRD, multiple ClusterManagementAddOn resources need to be created and reference the same AddOnConfiguration. + description: 'Deprecated: Use supportedConfigs filed instead addOnConfiguration is a reference to configuration information for the add-on. In scenario where a multiple add-ons share the same add-on CRD, multiple ClusterManagementAddOn resources need to be created and reference the same AddOnConfiguration.' type: object properties: crName: @@ -61,6 +61,35 @@ spec: displayName: description: displayName represents the name of add-on that will be displayed. type: string + supportedConfigs: + description: supportedConfigs is a list of configuration types supported by add-on. An empty list means the add-on does not require configurations. The default is an empty list + type: array + items: + description: ConfigMeta represents a collection of metadata information for add-on configuration. + type: object + required: + - resource + properties: + defaultConfig: + description: defaultConfig represents the namespace and name of the default add-on configuration. In scenario where all add-ons have a same configuration. + type: object + required: + - name + properties: + name: + description: name of the add-on configuration. + type: string + minLength: 1 + namespace: + description: namespace of the add-on configuration. If this field is not set, the configuration is in the cluster scope. + type: string + group: + description: group of the add-on configuration. + type: string + resource: + description: resource of the add-on configuration. + type: string + minLength: 1 status: description: status represents the current status of cluster management add-on. type: object diff --git a/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_01_addon.open-cluster-management.io_managedclusteraddons.crd.yaml b/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_01_addon.open-cluster-management.io_managedclusteraddons.crd.yaml index 614048d1e..9d12081b7 100644 --- a/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_01_addon.open-cluster-management.io_managedclusteraddons.crd.yaml +++ b/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_01_addon.open-cluster-management.io_managedclusteraddons.crd.yaml @@ -42,9 +42,33 @@ spec: description: spec holds configuration that could apply to any operator. type: object properties: + configs: + description: configs is a list of add-on configurations. In scenario where the current add-on has its own configurations. An empty list means there are no defautl configurations for add-on. The default is an empty list + type: array + items: + type: object + required: + - name + - resource + properties: + group: + description: group of the add-on configuration. + type: string + name: + description: name of the add-on configuration. + type: string + minLength: 1 + namespace: + description: namespace of the add-on configuration. If this field is not set, the configuration is in the cluster scope. + type: string + resource: + description: resource of the add-on configuration. + type: string + minLength: 1 installNamespace: description: installNamespace is the namespace on the managed cluster to install the addon agent. If it is not set, open-cluster-management-agent-addon namespace is used to install the addon agent. type: string + default: open-cluster-management-agent-addon maxLength: 63 pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ status: @@ -52,7 +76,7 @@ spec: type: object properties: addOnConfiguration: - description: addOnConfiguration is a reference to configuration information for the add-on. This resource is use to locate the configuration resource for the add-on. + description: 'Deprecated: Use configReference instead addOnConfiguration is a reference to configuration information for the add-on. This resource is use to locate the configuration resource for the add-on.' type: object properties: crName: @@ -119,6 +143,34 @@ spec: type: string maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + configReferences: + description: configReferences is a list of current add-on configuration references. This will be overridden by the clustermanagementaddon configuration references. + type: array + items: + description: ConfigReference is a reference to the current add-on configuration. This resource is used to locate the configuration resource for the current add-on. + type: object + required: + - name + - resource + properties: + group: + description: group of the add-on configuration. + type: string + lastObservedGeneration: + description: lastObservedGeneration is the observed generation of the add-on configuration. + type: integer + format: int64 + name: + description: name of the add-on configuration. + type: string + minLength: 1 + namespace: + description: namespace of the add-on configuration. If this field is not set, the configuration is in the cluster scope. + type: string + resource: + description: resource of the add-on configuration. + type: string + minLength: 1 healthCheck: description: healthCheck indicates how to check the healthiness status of the current addon. It should be set by each addon implementation, by default, the lease mode will be used. type: object diff --git a/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_02_addon.open-cluster-management.io_addondeploymentconfigs.crd.yaml b/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_02_addon.open-cluster-management.io_addondeploymentconfigs.crd.yaml new file mode 100644 index 000000000..0707f403c --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_02_addon.open-cluster-management.io_addondeploymentconfigs.crd.yaml @@ -0,0 +1,93 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: addondeploymentconfigs.addon.open-cluster-management.io +spec: + group: addon.open-cluster-management.io + names: + kind: AddOnDeploymentConfig + listKind: AddOnDeploymentConfigList + plural: addondeploymentconfigs + singular: addondeploymentconfig + scope: Namespaced + preserveUnknownFields: false + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: AddOnDeploymentConfig represents a deployment configuration for an add-on. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec represents a desired configuration for an add-on. + type: object + properties: + customizedVariables: + description: CustomizedVariables is a list of name-value variables for the current add-on deployment. The add-on implementation can use these variables to render its add-on deployment. The default is an empty list. + type: array + items: + description: CustomizedVariable represents a customized variable for add-on deployment. + type: object + required: + - name + properties: + name: + description: Name of this variable. + type: string + maxLength: 255 + pattern: ^[a-zA-Z_][_a-zA-Z0-9]*$ + value: + description: Value of this variable. + type: string + maxLength: 1024 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodePlacement: + description: NodePlacement enables explicit control over the scheduling of the add-on agents on the managed cluster. All add-on agent pods are expected to comply with this node placement. If the placement is nil, the placement is not specified, it will be omitted. If the placement is an empty object, the placement will match all nodes and tolerate nothing. + type: object + properties: + nodeSelector: + description: NodeSelector defines which Nodes the Pods are scheduled on. If the selector is an empty list, it will match all nodes. The default is an empty list. + type: object + additionalProperties: + type: string + tolerations: + description: Tolerations is attached by pods to tolerate any taint that matches the triple using the matching operator . If the tolerations is an empty list, it will tolerate nothing. The default is an empty list. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/register.go b/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/register.go index 6464d8be4..ac1f3383f 100644 --- a/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/register.go +++ b/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/register.go @@ -34,6 +34,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ClusterManagementAddOnList{}, &ManagedClusterAddOn{}, &ManagedClusterAddOnList{}, + &AddOnDeploymentConfig{}, + &AddOnDeploymentConfigList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil diff --git a/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/types_addondeploymentconfig.go b/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/types_addondeploymentconfig.go new file mode 100644 index 000000000..e0ab38487 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/types_addondeploymentconfig.go @@ -0,0 +1,80 @@ +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AddOnDeploymentConfig represents a deployment configuration for an add-on. +type AddOnDeploymentConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec represents a desired configuration for an add-on. + // +required + Spec AddOnDeploymentConfigSpec `json:"spec"` +} + +type AddOnDeploymentConfigSpec struct { + // CustomizedVariables is a list of name-value variables for the current add-on deployment. + // The add-on implementation can use these variables to render its add-on deployment. + // The default is an empty list. + // +optional + // +listType=map + // +listMapKey=name + CustomizedVariables []CustomizedVariable `json:"customizedVariables,omitempty"` + + // NodePlacement enables explicit control over the scheduling of the add-on agents on the + // managed cluster. + // All add-on agent pods are expected to comply with this node placement. + // If the placement is nil, the placement is not specified, it will be omitted. + // If the placement is an empty object, the placement will match all nodes and tolerate nothing. + // +optional + NodePlacement *NodePlacement `json:"nodePlacement,omitempty"` +} + +// CustomizedVariable represents a customized variable for add-on deployment. +type CustomizedVariable struct { + // Name of this variable. + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=255 + // +kubebuilder:validation:Pattern=^[a-zA-Z_][_a-zA-Z0-9]*$ + Name string `json:"name"` + + // Value of this variable. + // +kubebuilder:validation:MaxLength=1024 + Value string `json:"value"` +} + +// NodePlacement describes node scheduling configuration for the pods. +type NodePlacement struct { + // NodeSelector defines which Nodes the Pods are scheduled on. + // If the selector is an empty list, it will match all nodes. + // The default is an empty list. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // Tolerations is attached by pods to tolerate any taint that matches + // the triple using the matching operator . + // If the tolerations is an empty list, it will tolerate nothing. + // The default is an empty list. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// AddOnDeploymentConfigList is a collection of add-on deployment config. +type AddOnDeploymentConfigList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is a list of add-on deployment config. + Items []AddOnDeploymentConfig `json:"items"` +} diff --git a/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/types_clustermanagementaddon.go b/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/types_clustermanagementaddon.go index 17ae0b6ae..9479a2de8 100644 --- a/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/types_clustermanagementaddon.go +++ b/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/types_clustermanagementaddon.go @@ -35,24 +35,42 @@ type ClusterManagementAddOn struct { type ClusterManagementAddOnSpec struct { // addOnMeta is a reference to the metadata information for the add-on. // +optional - AddOnMeta AddOnMeta `json:"addOnMeta"` + AddOnMeta AddOnMeta `json:"addOnMeta,omitempty"` + // Deprecated: Use supportedConfigs filed instead // addOnConfiguration is a reference to configuration information for the add-on. - // In scenario where a multiple add-ons share the same add-on CRD, - // multiple ClusterManagementAddOn resources need to be created and reference the same AddOnConfiguration. + // In scenario where a multiple add-ons share the same add-on CRD, multiple ClusterManagementAddOn + // resources need to be created and reference the same AddOnConfiguration. // +optional - AddOnConfiguration ConfigCoordinates `json:"addOnConfiguration"` + AddOnConfiguration ConfigCoordinates `json:"addOnConfiguration,omitempty"` + + // supportedConfigs is a list of configuration types supported by add-on. + // An empty list means the add-on does not require configurations. + // The default is an empty list + // +optional + SupportedConfigs []ConfigMeta `json:"supportedConfigs,omitempty"` } // AddOnMeta represents a collection of metadata information for the add-on. type AddOnMeta struct { // displayName represents the name of add-on that will be displayed. // +optional - DisplayName string `json:"displayName"` + DisplayName string `json:"displayName,omitempty"` // description represents the detailed description of the add-on. // +optional - Description string `json:"description"` + Description string `json:"description,omitempty"` +} + +// ConfigMeta represents a collection of metadata information for add-on configuration. +type ConfigMeta struct { + // group and resouce of the add-on configuration. + ConfigGroupResource `json:",inline"` + + // defaultConfig represents the namespace and name of the default add-on configuration. + // In scenario where all add-ons have a same configuration. + // +optional + DefaultConfig *ConfigReferent `json:"defaultConfig,omitempty"` } // ConfigCoordinates represents the information for locating the CRD and CR that configures the add-on. @@ -60,18 +78,44 @@ type ConfigCoordinates struct { // crdName is the name of the CRD used to configure instances of the managed add-on. // This field should be configured if the add-on have a CRD that controls the configuration of the add-on. // +optional - CRDName string `json:"crdName"` + CRDName string `json:"crdName,omitempty"` // crName is the name of the CR used to configure instances of the managed add-on. // This field should be configured if add-on CR have a consistent name across the all of the ManagedCluster instaces. // +optional - CRName string `json:"crName"` + CRName string `json:"crName,omitempty"` // lastObservedGeneration is the observed generation of the custom resource for the configuration of the addon. // +optional LastObservedGeneration int64 `json:"lastObservedGeneration,omitempty"` } +// ConfigGroupResource represents the GroupResource of the add-on configuration +type ConfigGroupResource struct { + // group of the add-on configuration. + Group string `json:"group"` + + // resource of the add-on configuration. + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength:=1 + Resource string `json:"resource"` +} + +// ConfigReferent represents the namespace and name for an add-on configuration. +type ConfigReferent struct { + // namespace of the add-on configuration. + // If this field is not set, the configuration is in the cluster scope. + // +optional + Namespace string `json:"namespace,omitempty"` + + // name of the add-on configuration. + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength:=1 + Name string `json:"name"` +} + // ClusterManagementAddOnStatus represents the current status of cluster management add-on. type ClusterManagementAddOnStatus struct { } diff --git a/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go b/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go index 7c20d159c..9fe2d16a0 100644 --- a/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go +++ b/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go @@ -35,9 +35,17 @@ type ManagedClusterAddOnSpec struct { // installNamespace is the namespace on the managed cluster to install the addon agent. // If it is not set, open-cluster-management-agent-addon namespace is used to install the addon agent. // +optional + // +kubebuilder:default=open-cluster-management-agent-addon // +kubebuilder:validation:MaxLength=63 // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ InstallNamespace string `json:"installNamespace,omitempty"` + + // configs is a list of add-on configurations. + // In scenario where the current add-on has its own configurations. + // An empty list means there are no defautl configurations for add-on. + // The default is an empty list + // +optional + Configs []AddOnConfig `json:"configs,omitempty"` } // RegistrationConfig defines the configuration of the addon agent to register to hub. The Klusterlet agent will @@ -60,6 +68,14 @@ type RegistrationConfig struct { Subject Subject `json:"subject,omitempty"` } +type AddOnConfig struct { + // group and resource of add-on configuration. + ConfigGroupResource `json:",inline"` + + // name and namespace of add-on configuration. + ConfigReferent `json:",inline"` +} + // Subject is the user subject of the addon agent to be registered to the hub. type Subject struct { // user is the user name of the addon agent. @@ -94,12 +110,18 @@ type ManagedClusterAddOnStatus struct { // addOnMeta is a reference to the metadata information for the add-on. // This should be same as the addOnMeta for the corresponding ClusterManagementAddOn resource. // +optional - AddOnMeta AddOnMeta `json:"addOnMeta"` + AddOnMeta AddOnMeta `json:"addOnMeta,omitempty"` + // Deprecated: Use configReference instead // addOnConfiguration is a reference to configuration information for the add-on. // This resource is use to locate the configuration resource for the add-on. // +optional - AddOnConfiguration ConfigCoordinates `json:"addOnConfiguration"` + AddOnConfiguration ConfigCoordinates `json:"addOnConfiguration,omitempty"` + + // configReferences is a list of current add-on configuration references. + // This will be overridden by the clustermanagementaddon configuration references. + // +optional + ConfigReferences []ConfigReference `json:"configReferences,omitempty"` // registrations is the conifigurations for the addon agent to register to hub. It should be set by each addon controller // on hub to define how the addon agent on managedcluster is registered. With the registration defined, @@ -126,6 +148,9 @@ const ( // ManagedClusterAddOnConditionDegraded represents that the addon agent is providing degraded service on // the managed cluster. ManagedClusterAddOnConditionDegraded string = "Degraded" + + // ManagedClusterAddOnCondtionConfigured represents that the addon agent is configured with its configuration + ManagedClusterAddOnCondtionConfigured string = "Configured" ) // ObjectReference contains enough information to let you inspect or modify the referred object. @@ -147,6 +172,21 @@ type ObjectReference struct { Name string `json:"name"` } +// ConfigReference is a reference to the current add-on configuration. +// This resource is used to locate the configuration resource for the current add-on. +type ConfigReference struct { + // This field is synced from ClusterManagementAddOn configGroupResource field. + ConfigGroupResource `json:",inline"` + + // This field is synced from ClusterManagementAddOn defaultConfig and ManagedClusterAddOn config fields. + // If both of them are defined, the ManagedClusterAddOn configs will overwrite the ClusterManagementAddOn + // defaultConfigs. + ConfigReferent `json:",inline"` + + // lastObservedGeneration is the observed generation of the add-on configuration. + LastObservedGeneration int64 `json:"lastObservedGeneration"` +} + // HealthCheckMode indicates the mode for the addon to check its healthiness status // +kubebuilder:validation:Enum=Lease;Customized type HealthCheckMode string diff --git a/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/zz_generated.deepcopy.go b/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/zz_generated.deepcopy.go index 0bd5ef90f..83b91d7b8 100644 --- a/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/zz_generated.deepcopy.go +++ b/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/zz_generated.deepcopy.go @@ -6,10 +6,115 @@ package v1alpha1 import ( + corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddOnConfig) DeepCopyInto(out *AddOnConfig) { + *out = *in + out.ConfigGroupResource = in.ConfigGroupResource + out.ConfigReferent = in.ConfigReferent + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOnConfig. +func (in *AddOnConfig) DeepCopy() *AddOnConfig { + if in == nil { + return nil + } + out := new(AddOnConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddOnDeploymentConfig) DeepCopyInto(out *AddOnDeploymentConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOnDeploymentConfig. +func (in *AddOnDeploymentConfig) DeepCopy() *AddOnDeploymentConfig { + if in == nil { + return nil + } + out := new(AddOnDeploymentConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AddOnDeploymentConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddOnDeploymentConfigList) DeepCopyInto(out *AddOnDeploymentConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AddOnDeploymentConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOnDeploymentConfigList. +func (in *AddOnDeploymentConfigList) DeepCopy() *AddOnDeploymentConfigList { + if in == nil { + return nil + } + out := new(AddOnDeploymentConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AddOnDeploymentConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddOnDeploymentConfigSpec) DeepCopyInto(out *AddOnDeploymentConfigSpec) { + *out = *in + if in.CustomizedVariables != nil { + in, out := &in.CustomizedVariables, &out.CustomizedVariables + *out = make([]CustomizedVariable, len(*in)) + copy(*out, *in) + } + if in.NodePlacement != nil { + in, out := &in.NodePlacement, &out.NodePlacement + *out = new(NodePlacement) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOnDeploymentConfigSpec. +func (in *AddOnDeploymentConfigSpec) DeepCopy() *AddOnDeploymentConfigSpec { + if in == nil { + return nil + } + out := new(AddOnDeploymentConfigSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AddOnMeta) DeepCopyInto(out *AddOnMeta) { *out = *in @@ -31,7 +136,7 @@ func (in *ClusterManagementAddOn) DeepCopyInto(out *ClusterManagementAddOn) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) out.Status = in.Status return } @@ -92,6 +197,13 @@ func (in *ClusterManagementAddOnSpec) DeepCopyInto(out *ClusterManagementAddOnSp *out = *in out.AddOnMeta = in.AddOnMeta out.AddOnConfiguration = in.AddOnConfiguration + if in.SupportedConfigs != nil { + in, out := &in.SupportedConfigs, &out.SupportedConfigs + *out = make([]ConfigMeta, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -137,6 +249,94 @@ func (in *ConfigCoordinates) DeepCopy() *ConfigCoordinates { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigGroupResource) DeepCopyInto(out *ConfigGroupResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigGroupResource. +func (in *ConfigGroupResource) DeepCopy() *ConfigGroupResource { + if in == nil { + return nil + } + out := new(ConfigGroupResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMeta) DeepCopyInto(out *ConfigMeta) { + *out = *in + out.ConfigGroupResource = in.ConfigGroupResource + if in.DefaultConfig != nil { + in, out := &in.DefaultConfig, &out.DefaultConfig + *out = new(ConfigReferent) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMeta. +func (in *ConfigMeta) DeepCopy() *ConfigMeta { + if in == nil { + return nil + } + out := new(ConfigMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigReference) DeepCopyInto(out *ConfigReference) { + *out = *in + out.ConfigGroupResource = in.ConfigGroupResource + out.ConfigReferent = in.ConfigReferent + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigReference. +func (in *ConfigReference) DeepCopy() *ConfigReference { + if in == nil { + return nil + } + out := new(ConfigReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigReferent) DeepCopyInto(out *ConfigReferent) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigReferent. +func (in *ConfigReferent) DeepCopy() *ConfigReferent { + if in == nil { + return nil + } + out := new(ConfigReferent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizedVariable) DeepCopyInto(out *CustomizedVariable) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedVariable. +func (in *CustomizedVariable) DeepCopy() *CustomizedVariable { + if in == nil { + return nil + } + out := new(CustomizedVariable) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HealthCheck) DeepCopyInto(out *HealthCheck) { *out = *in @@ -158,7 +358,7 @@ func (in *ManagedClusterAddOn) DeepCopyInto(out *ManagedClusterAddOn) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return } @@ -217,6 +417,11 @@ func (in *ManagedClusterAddOnList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManagedClusterAddOnSpec) DeepCopyInto(out *ManagedClusterAddOnSpec) { *out = *in + if in.Configs != nil { + in, out := &in.Configs, &out.Configs + *out = make([]AddOnConfig, len(*in)) + copy(*out, *in) + } return } @@ -247,6 +452,11 @@ func (in *ManagedClusterAddOnStatus) DeepCopyInto(out *ManagedClusterAddOnStatus } out.AddOnMeta = in.AddOnMeta out.AddOnConfiguration = in.AddOnConfiguration + if in.ConfigReferences != nil { + in, out := &in.ConfigReferences, &out.ConfigReferences + *out = make([]ConfigReference, len(*in)) + copy(*out, *in) + } if in.Registrations != nil { in, out := &in.Registrations, &out.Registrations *out = make([]RegistrationConfig, len(*in)) @@ -268,6 +478,36 @@ func (in *ManagedClusterAddOnStatus) DeepCopy() *ManagedClusterAddOnStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePlacement) DeepCopyInto(out *NodePlacement) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePlacement. +func (in *NodePlacement) DeepCopy() *NodePlacement { + if in == nil { + return nil + } + out := new(NodePlacement) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { *out = *in diff --git a/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/zz_generated.swagger_doc_generated.go b/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/zz_generated.swagger_doc_generated.go index b8dbd2192..2278ea4b3 100644 --- a/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/controlplane/vendor/open-cluster-management.io/api/addon/v1alpha1/zz_generated.swagger_doc_generated.go @@ -11,6 +11,54 @@ package v1alpha1 // Those methods can be generated by using hack/update-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE +var map_AddOnDeploymentConfig = map[string]string{ + "": "AddOnDeploymentConfig represents a deployment configuration for an add-on.", + "spec": "spec represents a desired configuration for an add-on.", +} + +func (AddOnDeploymentConfig) SwaggerDoc() map[string]string { + return map_AddOnDeploymentConfig +} + +var map_AddOnDeploymentConfigList = map[string]string{ + "": "AddOnDeploymentConfigList is a collection of add-on deployment config.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "Items is a list of add-on deployment config.", +} + +func (AddOnDeploymentConfigList) SwaggerDoc() map[string]string { + return map_AddOnDeploymentConfigList +} + +var map_AddOnDeploymentConfigSpec = map[string]string{ + "customizedVariables": "CustomizedVariables is a list of name-value variables for the current add-on deployment. The add-on implementation can use these variables to render its add-on deployment. The default is an empty list.", + "nodePlacement": "NodePlacement enables explicit control over the scheduling of the add-on agents on the managed cluster. All add-on agent pods are expected to comply with this node placement. If the placement is nil, the placement is not specified, it will be omitted. If the placement is an empty object, the placement will match all nodes and tolerate nothing.", +} + +func (AddOnDeploymentConfigSpec) SwaggerDoc() map[string]string { + return map_AddOnDeploymentConfigSpec +} + +var map_CustomizedVariable = map[string]string{ + "": "CustomizedVariable represents a customized variable for add-on deployment.", + "name": "Name of this variable.", + "value": "Value of this variable.", +} + +func (CustomizedVariable) SwaggerDoc() map[string]string { + return map_CustomizedVariable +} + +var map_NodePlacement = map[string]string{ + "": "NodePlacement describes node scheduling configuration for the pods.", + "nodeSelector": "NodeSelector defines which Nodes the Pods are scheduled on. If the selector is an empty list, it will match all nodes. The default is an empty list.", + "tolerations": "Tolerations is attached by pods to tolerate any taint that matches the triple using the matching operator . If the tolerations is an empty list, it will tolerate nothing. The default is an empty list.", +} + +func (NodePlacement) SwaggerDoc() map[string]string { + return map_NodePlacement +} + var map_AddOnMeta = map[string]string{ "": "AddOnMeta represents a collection of metadata information for the add-on.", "displayName": "displayName represents the name of add-on that will be displayed.", @@ -44,7 +92,8 @@ func (ClusterManagementAddOnList) SwaggerDoc() map[string]string { var map_ClusterManagementAddOnSpec = map[string]string{ "": "ClusterManagementAddOnSpec provides information for the add-on.", "addOnMeta": "addOnMeta is a reference to the metadata information for the add-on.", - "addOnConfiguration": "addOnConfiguration is a reference to configuration information for the add-on. In scenario where a multiple add-ons share the same add-on CRD, multiple ClusterManagementAddOn resources need to be created and reference the same AddOnConfiguration.", + "addOnConfiguration": "Deprecated: Use supportedConfigs filed instead addOnConfiguration is a reference to configuration information for the add-on. In scenario where a multiple add-ons share the same add-on CRD, multiple ClusterManagementAddOn resources need to be created and reference the same AddOnConfiguration.", + "supportedConfigs": "supportedConfigs is a list of configuration types supported by add-on. An empty list means the add-on does not require configurations. The default is an empty list", } func (ClusterManagementAddOnSpec) SwaggerDoc() map[string]string { @@ -70,6 +119,44 @@ func (ConfigCoordinates) SwaggerDoc() map[string]string { return map_ConfigCoordinates } +var map_ConfigGroupResource = map[string]string{ + "": "ConfigGroupResource represents the GroupResource of the add-on configuration", + "group": "group of the add-on configuration.", + "resource": "resource of the add-on configuration.", +} + +func (ConfigGroupResource) SwaggerDoc() map[string]string { + return map_ConfigGroupResource +} + +var map_ConfigMeta = map[string]string{ + "": "ConfigMeta represents a collection of metadata information for add-on configuration.", + "defaultConfig": "defaultConfig represents the namespace and name of the default add-on configuration. In scenario where all add-ons have a same configuration.", +} + +func (ConfigMeta) SwaggerDoc() map[string]string { + return map_ConfigMeta +} + +var map_ConfigReferent = map[string]string{ + "": "ConfigReferent represents the namespace and name for an add-on configuration.", + "namespace": "namespace of the add-on configuration. If this field is not set, the configuration is in the cluster scope.", + "name": "name of the add-on configuration.", +} + +func (ConfigReferent) SwaggerDoc() map[string]string { + return map_ConfigReferent +} + +var map_ConfigReference = map[string]string{ + "": "ConfigReference is a reference to the current add-on configuration. This resource is used to locate the configuration resource for the current add-on.", + "lastObservedGeneration": "lastObservedGeneration is the observed generation of the add-on configuration.", +} + +func (ConfigReference) SwaggerDoc() map[string]string { + return map_ConfigReference +} + var map_HealthCheck = map[string]string{ "mode": "mode indicates which mode will be used to check the healthiness status of the addon.", } @@ -99,6 +186,7 @@ func (ManagedClusterAddOnList) SwaggerDoc() map[string]string { var map_ManagedClusterAddOnSpec = map[string]string{ "": "ManagedClusterAddOnSpec defines the install configuration of an addon agent on managed cluster.", "installNamespace": "installNamespace is the namespace on the managed cluster to install the addon agent. If it is not set, open-cluster-management-agent-addon namespace is used to install the addon agent.", + "configs": "configs is a list of add-on configurations. In scenario where the current add-on has its own configurations. An empty list means there are no defautl configurations for add-on. The default is an empty list", } func (ManagedClusterAddOnSpec) SwaggerDoc() map[string]string { @@ -110,7 +198,8 @@ var map_ManagedClusterAddOnStatus = map[string]string{ "conditions": "conditions describe the state of the managed and monitored components for the operator.", "relatedObjects": "relatedObjects is a list of objects that are \"interesting\" or related to this operator. Common uses are: 1. the detailed resource driving the operator 2. operator namespaces 3. operand namespaces 4. related ClusterManagementAddon resource", "addOnMeta": "addOnMeta is a reference to the metadata information for the add-on. This should be same as the addOnMeta for the corresponding ClusterManagementAddOn resource.", - "addOnConfiguration": "addOnConfiguration is a reference to configuration information for the add-on. This resource is use to locate the configuration resource for the add-on.", + "addOnConfiguration": "Deprecated: Use configReference instead addOnConfiguration is a reference to configuration information for the add-on. This resource is use to locate the configuration resource for the add-on.", + "configReferences": "configReferences is a list of current add-on configuration references. This will be overridden by the clustermanagementaddon configuration references.", "registrations": "registrations is the conifigurations for the addon agent to register to hub. It should be set by each addon controller on hub to define how the addon agent on managedcluster is registered. With the registration defined, The addon agent can access to kube apiserver with kube style API or other endpoints on hub cluster with client certificate authentication. A csr will be created per registration configuration. If more than one registrationConfig is defined, a csr will be created for each registration configuration. It is not allowed that multiple registrationConfigs have the same signer name. After the csr is approved on the hub cluster, the klusterlet agent will create a secret in the installNamespace for the registrationConfig. If the signerName is \"kubernetes.io/kube-apiserver-client\", the secret name will be \"{addon name}-hub-kubeconfig\" whose contents includes key/cert and kubeconfig. Otherwise, the secret name will be \"{addon name}-{signer name}-client-cert\" whose contents includes key/cert.", "healthCheck": "healthCheck indicates how to check the healthiness status of the current addon. It should be set by each addon implementation, by default, the lease mode will be used.", } diff --git a/controlplane/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/addon_client.go b/controlplane/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/addon_client.go index 895e7a4eb..175a2f4a2 100644 --- a/controlplane/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/addon_client.go +++ b/controlplane/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/addon_client.go @@ -12,6 +12,7 @@ import ( type AddonV1alpha1Interface interface { RESTClient() rest.Interface + AddOnDeploymentConfigsGetter ClusterManagementAddOnsGetter ManagedClusterAddOnsGetter } @@ -21,6 +22,10 @@ type AddonV1alpha1Client struct { restClient rest.Interface } +func (c *AddonV1alpha1Client) AddOnDeploymentConfigs(namespace string) AddOnDeploymentConfigInterface { + return newAddOnDeploymentConfigs(c, namespace) +} + func (c *AddonV1alpha1Client) ClusterManagementAddOns() ClusterManagementAddOnInterface { return newClusterManagementAddOns(c) } diff --git a/controlplane/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/addondeploymentconfig.go b/controlplane/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/addondeploymentconfig.go new file mode 100644 index 000000000..e6888f16b --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/addondeploymentconfig.go @@ -0,0 +1,162 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + scheme "open-cluster-management.io/api/client/addon/clientset/versioned/scheme" +) + +// AddOnDeploymentConfigsGetter has a method to return a AddOnDeploymentConfigInterface. +// A group's client should implement this interface. +type AddOnDeploymentConfigsGetter interface { + AddOnDeploymentConfigs(namespace string) AddOnDeploymentConfigInterface +} + +// AddOnDeploymentConfigInterface has methods to work with AddOnDeploymentConfig resources. +type AddOnDeploymentConfigInterface interface { + Create(ctx context.Context, addOnDeploymentConfig *v1alpha1.AddOnDeploymentConfig, opts v1.CreateOptions) (*v1alpha1.AddOnDeploymentConfig, error) + Update(ctx context.Context, addOnDeploymentConfig *v1alpha1.AddOnDeploymentConfig, opts v1.UpdateOptions) (*v1alpha1.AddOnDeploymentConfig, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.AddOnDeploymentConfig, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.AddOnDeploymentConfigList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AddOnDeploymentConfig, err error) + AddOnDeploymentConfigExpansion +} + +// addOnDeploymentConfigs implements AddOnDeploymentConfigInterface +type addOnDeploymentConfigs struct { + client rest.Interface + ns string +} + +// newAddOnDeploymentConfigs returns a AddOnDeploymentConfigs +func newAddOnDeploymentConfigs(c *AddonV1alpha1Client, namespace string) *addOnDeploymentConfigs { + return &addOnDeploymentConfigs{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the addOnDeploymentConfig, and returns the corresponding addOnDeploymentConfig object, and an error if there is any. +func (c *addOnDeploymentConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.AddOnDeploymentConfig, err error) { + result = &v1alpha1.AddOnDeploymentConfig{} + err = c.client.Get(). + Namespace(c.ns). + Resource("addondeploymentconfigs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of AddOnDeploymentConfigs that match those selectors. +func (c *addOnDeploymentConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.AddOnDeploymentConfigList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.AddOnDeploymentConfigList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("addondeploymentconfigs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested addOnDeploymentConfigs. +func (c *addOnDeploymentConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("addondeploymentconfigs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a addOnDeploymentConfig and creates it. Returns the server's representation of the addOnDeploymentConfig, and an error, if there is any. +func (c *addOnDeploymentConfigs) Create(ctx context.Context, addOnDeploymentConfig *v1alpha1.AddOnDeploymentConfig, opts v1.CreateOptions) (result *v1alpha1.AddOnDeploymentConfig, err error) { + result = &v1alpha1.AddOnDeploymentConfig{} + err = c.client.Post(). + Namespace(c.ns). + Resource("addondeploymentconfigs"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(addOnDeploymentConfig). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a addOnDeploymentConfig and updates it. Returns the server's representation of the addOnDeploymentConfig, and an error, if there is any. +func (c *addOnDeploymentConfigs) Update(ctx context.Context, addOnDeploymentConfig *v1alpha1.AddOnDeploymentConfig, opts v1.UpdateOptions) (result *v1alpha1.AddOnDeploymentConfig, err error) { + result = &v1alpha1.AddOnDeploymentConfig{} + err = c.client.Put(). + Namespace(c.ns). + Resource("addondeploymentconfigs"). + Name(addOnDeploymentConfig.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(addOnDeploymentConfig). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the addOnDeploymentConfig and deletes it. Returns an error if one occurs. +func (c *addOnDeploymentConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("addondeploymentconfigs"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *addOnDeploymentConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("addondeploymentconfigs"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched addOnDeploymentConfig. +func (c *addOnDeploymentConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AddOnDeploymentConfig, err error) { + result = &v1alpha1.AddOnDeploymentConfig{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("addondeploymentconfigs"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/controlplane/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/generated_expansion.go b/controlplane/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/generated_expansion.go index 06fa938e4..c3d0b0d49 100644 --- a/controlplane/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/generated_expansion.go +++ b/controlplane/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/generated_expansion.go @@ -2,6 +2,8 @@ package v1alpha1 +type AddOnDeploymentConfigExpansion interface{} + type ClusterManagementAddOnExpansion interface{} type ManagedClusterAddOnExpansion interface{} diff --git a/controlplane/vendor/open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1/addondeploymentconfig.go b/controlplane/vendor/open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1/addondeploymentconfig.go new file mode 100644 index 000000000..1097b8914 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1/addondeploymentconfig.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + versioned "open-cluster-management.io/api/client/addon/clientset/versioned" + internalinterfaces "open-cluster-management.io/api/client/addon/informers/externalversions/internalinterfaces" + v1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" +) + +// AddOnDeploymentConfigInformer provides access to a shared informer and lister for +// AddOnDeploymentConfigs. +type AddOnDeploymentConfigInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.AddOnDeploymentConfigLister +} + +type addOnDeploymentConfigInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewAddOnDeploymentConfigInformer constructs a new informer for AddOnDeploymentConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewAddOnDeploymentConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredAddOnDeploymentConfigInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredAddOnDeploymentConfigInformer constructs a new informer for AddOnDeploymentConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredAddOnDeploymentConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AddonV1alpha1().AddOnDeploymentConfigs(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Watch(context.TODO(), options) + }, + }, + &addonv1alpha1.AddOnDeploymentConfig{}, + resyncPeriod, + indexers, + ) +} + +func (f *addOnDeploymentConfigInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredAddOnDeploymentConfigInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *addOnDeploymentConfigInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&addonv1alpha1.AddOnDeploymentConfig{}, f.defaultInformer) +} + +func (f *addOnDeploymentConfigInformer) Lister() v1alpha1.AddOnDeploymentConfigLister { + return v1alpha1.NewAddOnDeploymentConfigLister(f.Informer().GetIndexer()) +} diff --git a/controlplane/vendor/open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1/interface.go b/controlplane/vendor/open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1/interface.go index e860bca85..efdacfb25 100644 --- a/controlplane/vendor/open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1/interface.go +++ b/controlplane/vendor/open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1/interface.go @@ -8,6 +8,8 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // AddOnDeploymentConfigs returns a AddOnDeploymentConfigInformer. + AddOnDeploymentConfigs() AddOnDeploymentConfigInformer // ClusterManagementAddOns returns a ClusterManagementAddOnInformer. ClusterManagementAddOns() ClusterManagementAddOnInformer // ManagedClusterAddOns returns a ManagedClusterAddOnInformer. @@ -25,6 +27,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// AddOnDeploymentConfigs returns a AddOnDeploymentConfigInformer. +func (v *version) AddOnDeploymentConfigs() AddOnDeploymentConfigInformer { + return &addOnDeploymentConfigInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // ClusterManagementAddOns returns a ClusterManagementAddOnInformer. func (v *version) ClusterManagementAddOns() ClusterManagementAddOnInformer { return &clusterManagementAddOnInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/controlplane/vendor/open-cluster-management.io/api/client/addon/informers/externalversions/generic.go b/controlplane/vendor/open-cluster-management.io/api/client/addon/informers/externalversions/generic.go index d77522057..c6a129e57 100644 --- a/controlplane/vendor/open-cluster-management.io/api/client/addon/informers/externalversions/generic.go +++ b/controlplane/vendor/open-cluster-management.io/api/client/addon/informers/externalversions/generic.go @@ -37,6 +37,8 @@ func (f *genericInformer) Lister() cache.GenericLister { func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { // Group=addon.open-cluster-management.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("addondeploymentconfigs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Addon().V1alpha1().AddOnDeploymentConfigs().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("clustermanagementaddons"): return &genericInformer{resource: resource.GroupResource(), informer: f.Addon().V1alpha1().ClusterManagementAddOns().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("managedclusteraddons"): diff --git a/controlplane/vendor/open-cluster-management.io/api/client/addon/listers/addon/v1alpha1/addondeploymentconfig.go b/controlplane/vendor/open-cluster-management.io/api/client/addon/listers/addon/v1alpha1/addondeploymentconfig.go new file mode 100644 index 000000000..e3d91d944 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/api/client/addon/listers/addon/v1alpha1/addondeploymentconfig.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1alpha1 "open-cluster-management.io/api/addon/v1alpha1" +) + +// AddOnDeploymentConfigLister helps list AddOnDeploymentConfigs. +// All objects returned here must be treated as read-only. +type AddOnDeploymentConfigLister interface { + // List lists all AddOnDeploymentConfigs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.AddOnDeploymentConfig, err error) + // AddOnDeploymentConfigs returns an object that can list and get AddOnDeploymentConfigs. + AddOnDeploymentConfigs(namespace string) AddOnDeploymentConfigNamespaceLister + AddOnDeploymentConfigListerExpansion +} + +// addOnDeploymentConfigLister implements the AddOnDeploymentConfigLister interface. +type addOnDeploymentConfigLister struct { + indexer cache.Indexer +} + +// NewAddOnDeploymentConfigLister returns a new AddOnDeploymentConfigLister. +func NewAddOnDeploymentConfigLister(indexer cache.Indexer) AddOnDeploymentConfigLister { + return &addOnDeploymentConfigLister{indexer: indexer} +} + +// List lists all AddOnDeploymentConfigs in the indexer. +func (s *addOnDeploymentConfigLister) List(selector labels.Selector) (ret []*v1alpha1.AddOnDeploymentConfig, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.AddOnDeploymentConfig)) + }) + return ret, err +} + +// AddOnDeploymentConfigs returns an object that can list and get AddOnDeploymentConfigs. +func (s *addOnDeploymentConfigLister) AddOnDeploymentConfigs(namespace string) AddOnDeploymentConfigNamespaceLister { + return addOnDeploymentConfigNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// AddOnDeploymentConfigNamespaceLister helps list and get AddOnDeploymentConfigs. +// All objects returned here must be treated as read-only. +type AddOnDeploymentConfigNamespaceLister interface { + // List lists all AddOnDeploymentConfigs in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.AddOnDeploymentConfig, err error) + // Get retrieves the AddOnDeploymentConfig from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.AddOnDeploymentConfig, error) + AddOnDeploymentConfigNamespaceListerExpansion +} + +// addOnDeploymentConfigNamespaceLister implements the AddOnDeploymentConfigNamespaceLister +// interface. +type addOnDeploymentConfigNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all AddOnDeploymentConfigs in the indexer for a given namespace. +func (s addOnDeploymentConfigNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.AddOnDeploymentConfig, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.AddOnDeploymentConfig)) + }) + return ret, err +} + +// Get retrieves the AddOnDeploymentConfig from the indexer for a given namespace and name. +func (s addOnDeploymentConfigNamespaceLister) Get(name string) (*v1alpha1.AddOnDeploymentConfig, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("addondeploymentconfig"), name) + } + return obj.(*v1alpha1.AddOnDeploymentConfig), nil +} diff --git a/controlplane/vendor/open-cluster-management.io/api/client/addon/listers/addon/v1alpha1/expansion_generated.go b/controlplane/vendor/open-cluster-management.io/api/client/addon/listers/addon/v1alpha1/expansion_generated.go index 223114bb7..5c007a56c 100644 --- a/controlplane/vendor/open-cluster-management.io/api/client/addon/listers/addon/v1alpha1/expansion_generated.go +++ b/controlplane/vendor/open-cluster-management.io/api/client/addon/listers/addon/v1alpha1/expansion_generated.go @@ -2,6 +2,14 @@ package v1alpha1 +// AddOnDeploymentConfigListerExpansion allows custom methods to be added to +// AddOnDeploymentConfigLister. +type AddOnDeploymentConfigListerExpansion interface{} + +// AddOnDeploymentConfigNamespaceListerExpansion allows custom methods to be added to +// AddOnDeploymentConfigNamespaceLister. +type AddOnDeploymentConfigNamespaceListerExpansion interface{} + // ClusterManagementAddOnListerExpansion allows custom methods to be added to // ClusterManagementAddOnLister. type ClusterManagementAddOnListerExpansion interface{} diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/clientset.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/clientset.go index 9e52a0b92..dfc37ff6f 100644 --- a/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/clientset.go +++ b/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/clientset.go @@ -12,6 +12,7 @@ import ( clusterv1 "open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1" clusterv1alpha1 "open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1" clusterv1beta1 "open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta1" + clusterv1beta2 "open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta2" ) type Interface interface { @@ -19,6 +20,7 @@ type Interface interface { ClusterV1() clusterv1.ClusterV1Interface ClusterV1alpha1() clusterv1alpha1.ClusterV1alpha1Interface ClusterV1beta1() clusterv1beta1.ClusterV1beta1Interface + ClusterV1beta2() clusterv1beta2.ClusterV1beta2Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -28,6 +30,7 @@ type Clientset struct { clusterV1 *clusterv1.ClusterV1Client clusterV1alpha1 *clusterv1alpha1.ClusterV1alpha1Client clusterV1beta1 *clusterv1beta1.ClusterV1beta1Client + clusterV1beta2 *clusterv1beta2.ClusterV1beta2Client } // ClusterV1 retrieves the ClusterV1Client @@ -45,6 +48,11 @@ func (c *Clientset) ClusterV1beta1() clusterv1beta1.ClusterV1beta1Interface { return c.clusterV1beta1 } +// ClusterV1beta2 retrieves the ClusterV1beta2Client +func (c *Clientset) ClusterV1beta2() clusterv1beta2.ClusterV1beta2Interface { + return c.clusterV1beta2 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -97,6 +105,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.clusterV1beta2, err = clusterv1beta2.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) if err != nil { @@ -121,6 +133,7 @@ func New(c rest.Interface) *Clientset { cs.clusterV1 = clusterv1.New(c) cs.clusterV1alpha1 = clusterv1alpha1.New(c) cs.clusterV1beta1 = clusterv1beta1.New(c) + cs.clusterV1beta2 = clusterv1beta2.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/scheme/register.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/scheme/register.go index f8360b0e1..d52290cc6 100644 --- a/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/scheme/register.go +++ b/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/scheme/register.go @@ -11,6 +11,7 @@ import ( clusterv1 "open-cluster-management.io/api/cluster/v1" clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2" ) var Scheme = runtime.NewScheme() @@ -20,6 +21,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ clusterv1.AddToScheme, clusterv1alpha1.AddToScheme, clusterv1beta1.AddToScheme, + clusterv1beta2.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/cluster_client.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/cluster_client.go index 32c960a61..e072aa81f 100644 --- a/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/cluster_client.go +++ b/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/cluster_client.go @@ -14,10 +14,6 @@ type ClusterV1alpha1Interface interface { RESTClient() rest.Interface AddOnPlacementScoresGetter ClusterClaimsGetter - ManagedClusterSetsGetter - ManagedClusterSetBindingsGetter - PlacementsGetter - PlacementDecisionsGetter } // ClusterV1alpha1Client is used to interact with features provided by the cluster.open-cluster-management.io group. @@ -33,22 +29,6 @@ func (c *ClusterV1alpha1Client) ClusterClaims() ClusterClaimInterface { return newClusterClaims(c) } -func (c *ClusterV1alpha1Client) ManagedClusterSets() ManagedClusterSetInterface { - return newManagedClusterSets(c) -} - -func (c *ClusterV1alpha1Client) ManagedClusterSetBindings(namespace string) ManagedClusterSetBindingInterface { - return newManagedClusterSetBindings(c, namespace) -} - -func (c *ClusterV1alpha1Client) Placements(namespace string) PlacementInterface { - return newPlacements(c, namespace) -} - -func (c *ClusterV1alpha1Client) PlacementDecisions(namespace string) PlacementDecisionInterface { - return newPlacementDecisions(c, namespace) -} - // NewForConfig creates a new ClusterV1alpha1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/generated_expansion.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/generated_expansion.go index 991d91b1c..e1db73fc9 100644 --- a/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/generated_expansion.go +++ b/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/generated_expansion.go @@ -5,11 +5,3 @@ package v1alpha1 type AddOnPlacementScoreExpansion interface{} type ClusterClaimExpansion interface{} - -type ManagedClusterSetExpansion interface{} - -type ManagedClusterSetBindingExpansion interface{} - -type PlacementExpansion interface{} - -type PlacementDecisionExpansion interface{} diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/placement.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/placement.go deleted file mode 100644 index 46f1f7e37..000000000 --- a/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/placement.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - scheme "open-cluster-management.io/api/client/cluster/clientset/versioned/scheme" - v1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" -) - -// PlacementsGetter has a method to return a PlacementInterface. -// A group's client should implement this interface. -type PlacementsGetter interface { - Placements(namespace string) PlacementInterface -} - -// PlacementInterface has methods to work with Placement resources. -type PlacementInterface interface { - Create(ctx context.Context, placement *v1alpha1.Placement, opts v1.CreateOptions) (*v1alpha1.Placement, error) - Update(ctx context.Context, placement *v1alpha1.Placement, opts v1.UpdateOptions) (*v1alpha1.Placement, error) - UpdateStatus(ctx context.Context, placement *v1alpha1.Placement, opts v1.UpdateOptions) (*v1alpha1.Placement, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Placement, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PlacementList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Placement, err error) - PlacementExpansion -} - -// placements implements PlacementInterface -type placements struct { - client rest.Interface - ns string -} - -// newPlacements returns a Placements -func newPlacements(c *ClusterV1alpha1Client, namespace string) *placements { - return &placements{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the placement, and returns the corresponding placement object, and an error if there is any. -func (c *placements) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Placement, err error) { - result = &v1alpha1.Placement{} - err = c.client.Get(). - Namespace(c.ns). - Resource("placements"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Placements that match those selectors. -func (c *placements) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PlacementList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.PlacementList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("placements"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested placements. -func (c *placements) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("placements"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a placement and creates it. Returns the server's representation of the placement, and an error, if there is any. -func (c *placements) Create(ctx context.Context, placement *v1alpha1.Placement, opts v1.CreateOptions) (result *v1alpha1.Placement, err error) { - result = &v1alpha1.Placement{} - err = c.client.Post(). - Namespace(c.ns). - Resource("placements"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(placement). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a placement and updates it. Returns the server's representation of the placement, and an error, if there is any. -func (c *placements) Update(ctx context.Context, placement *v1alpha1.Placement, opts v1.UpdateOptions) (result *v1alpha1.Placement, err error) { - result = &v1alpha1.Placement{} - err = c.client.Put(). - Namespace(c.ns). - Resource("placements"). - Name(placement.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(placement). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *placements) UpdateStatus(ctx context.Context, placement *v1alpha1.Placement, opts v1.UpdateOptions) (result *v1alpha1.Placement, err error) { - result = &v1alpha1.Placement{} - err = c.client.Put(). - Namespace(c.ns). - Resource("placements"). - Name(placement.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(placement). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the placement and deletes it. Returns an error if one occurs. -func (c *placements) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("placements"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *placements) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("placements"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched placement. -func (c *placements) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Placement, err error) { - result = &v1alpha1.Placement{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("placements"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/placementdecision.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/placementdecision.go deleted file mode 100644 index 72c96a26e..000000000 --- a/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/placementdecision.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - scheme "open-cluster-management.io/api/client/cluster/clientset/versioned/scheme" - v1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" -) - -// PlacementDecisionsGetter has a method to return a PlacementDecisionInterface. -// A group's client should implement this interface. -type PlacementDecisionsGetter interface { - PlacementDecisions(namespace string) PlacementDecisionInterface -} - -// PlacementDecisionInterface has methods to work with PlacementDecision resources. -type PlacementDecisionInterface interface { - Create(ctx context.Context, placementDecision *v1alpha1.PlacementDecision, opts v1.CreateOptions) (*v1alpha1.PlacementDecision, error) - Update(ctx context.Context, placementDecision *v1alpha1.PlacementDecision, opts v1.UpdateOptions) (*v1alpha1.PlacementDecision, error) - UpdateStatus(ctx context.Context, placementDecision *v1alpha1.PlacementDecision, opts v1.UpdateOptions) (*v1alpha1.PlacementDecision, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PlacementDecision, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PlacementDecisionList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PlacementDecision, err error) - PlacementDecisionExpansion -} - -// placementDecisions implements PlacementDecisionInterface -type placementDecisions struct { - client rest.Interface - ns string -} - -// newPlacementDecisions returns a PlacementDecisions -func newPlacementDecisions(c *ClusterV1alpha1Client, namespace string) *placementDecisions { - return &placementDecisions{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the placementDecision, and returns the corresponding placementDecision object, and an error if there is any. -func (c *placementDecisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PlacementDecision, err error) { - result = &v1alpha1.PlacementDecision{} - err = c.client.Get(). - Namespace(c.ns). - Resource("placementdecisions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PlacementDecisions that match those selectors. -func (c *placementDecisions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PlacementDecisionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.PlacementDecisionList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("placementdecisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested placementDecisions. -func (c *placementDecisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("placementdecisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a placementDecision and creates it. Returns the server's representation of the placementDecision, and an error, if there is any. -func (c *placementDecisions) Create(ctx context.Context, placementDecision *v1alpha1.PlacementDecision, opts v1.CreateOptions) (result *v1alpha1.PlacementDecision, err error) { - result = &v1alpha1.PlacementDecision{} - err = c.client.Post(). - Namespace(c.ns). - Resource("placementdecisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(placementDecision). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a placementDecision and updates it. Returns the server's representation of the placementDecision, and an error, if there is any. -func (c *placementDecisions) Update(ctx context.Context, placementDecision *v1alpha1.PlacementDecision, opts v1.UpdateOptions) (result *v1alpha1.PlacementDecision, err error) { - result = &v1alpha1.PlacementDecision{} - err = c.client.Put(). - Namespace(c.ns). - Resource("placementdecisions"). - Name(placementDecision.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(placementDecision). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *placementDecisions) UpdateStatus(ctx context.Context, placementDecision *v1alpha1.PlacementDecision, opts v1.UpdateOptions) (result *v1alpha1.PlacementDecision, err error) { - result = &v1alpha1.PlacementDecision{} - err = c.client.Put(). - Namespace(c.ns). - Resource("placementdecisions"). - Name(placementDecision.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(placementDecision). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the placementDecision and deletes it. Returns an error if one occurs. -func (c *placementDecisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("placementdecisions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *placementDecisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("placementdecisions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched placementDecision. -func (c *placementDecisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PlacementDecision, err error) { - result = &v1alpha1.PlacementDecision{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("placementdecisions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta2/cluster_client.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta2/cluster_client.go new file mode 100644 index 000000000..e68271b68 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta2/cluster_client.go @@ -0,0 +1,96 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "net/http" + + rest "k8s.io/client-go/rest" + "open-cluster-management.io/api/client/cluster/clientset/versioned/scheme" + v1beta2 "open-cluster-management.io/api/cluster/v1beta2" +) + +type ClusterV1beta2Interface interface { + RESTClient() rest.Interface + ManagedClusterSetsGetter + ManagedClusterSetBindingsGetter +} + +// ClusterV1beta2Client is used to interact with features provided by the cluster.open-cluster-management.io group. +type ClusterV1beta2Client struct { + restClient rest.Interface +} + +func (c *ClusterV1beta2Client) ManagedClusterSets() ManagedClusterSetInterface { + return newManagedClusterSets(c) +} + +func (c *ClusterV1beta2Client) ManagedClusterSetBindings(namespace string) ManagedClusterSetBindingInterface { + return newManagedClusterSetBindings(c, namespace) +} + +// NewForConfig creates a new ClusterV1beta2Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*ClusterV1beta2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ClusterV1beta2Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ClusterV1beta2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &ClusterV1beta2Client{client}, nil +} + +// NewForConfigOrDie creates a new ClusterV1beta2Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ClusterV1beta2Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ClusterV1beta2Client for the given RESTClient. +func New(c rest.Interface) *ClusterV1beta2Client { + return &ClusterV1beta2Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta2.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ClusterV1beta2Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta2/doc.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta2/doc.go new file mode 100644 index 000000000..ebe38377f --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta2/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta2 diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta2/generated_expansion.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta2/generated_expansion.go new file mode 100644 index 000000000..c651d50b1 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta2/generated_expansion.go @@ -0,0 +1,7 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +type ManagedClusterSetExpansion interface{} + +type ManagedClusterSetBindingExpansion interface{} diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/managedclusterset.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta2/managedclusterset.go similarity index 76% rename from controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/managedclusterset.go rename to controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta2/managedclusterset.go index a284783f7..5ca1be32a 100644 --- a/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/managedclusterset.go +++ b/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta2/managedclusterset.go @@ -1,6 +1,6 @@ // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1beta2 import ( "context" @@ -11,7 +11,7 @@ import ( watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" scheme "open-cluster-management.io/api/client/cluster/clientset/versioned/scheme" - v1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" + v1beta2 "open-cluster-management.io/api/cluster/v1beta2" ) // ManagedClusterSetsGetter has a method to return a ManagedClusterSetInterface. @@ -22,15 +22,15 @@ type ManagedClusterSetsGetter interface { // ManagedClusterSetInterface has methods to work with ManagedClusterSet resources. type ManagedClusterSetInterface interface { - Create(ctx context.Context, managedClusterSet *v1alpha1.ManagedClusterSet, opts v1.CreateOptions) (*v1alpha1.ManagedClusterSet, error) - Update(ctx context.Context, managedClusterSet *v1alpha1.ManagedClusterSet, opts v1.UpdateOptions) (*v1alpha1.ManagedClusterSet, error) - UpdateStatus(ctx context.Context, managedClusterSet *v1alpha1.ManagedClusterSet, opts v1.UpdateOptions) (*v1alpha1.ManagedClusterSet, error) + Create(ctx context.Context, managedClusterSet *v1beta2.ManagedClusterSet, opts v1.CreateOptions) (*v1beta2.ManagedClusterSet, error) + Update(ctx context.Context, managedClusterSet *v1beta2.ManagedClusterSet, opts v1.UpdateOptions) (*v1beta2.ManagedClusterSet, error) + UpdateStatus(ctx context.Context, managedClusterSet *v1beta2.ManagedClusterSet, opts v1.UpdateOptions) (*v1beta2.ManagedClusterSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ManagedClusterSet, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ManagedClusterSetList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.ManagedClusterSet, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta2.ManagedClusterSetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ManagedClusterSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ManagedClusterSet, err error) ManagedClusterSetExpansion } @@ -40,15 +40,15 @@ type managedClusterSets struct { } // newManagedClusterSets returns a ManagedClusterSets -func newManagedClusterSets(c *ClusterV1alpha1Client) *managedClusterSets { +func newManagedClusterSets(c *ClusterV1beta2Client) *managedClusterSets { return &managedClusterSets{ client: c.RESTClient(), } } // Get takes name of the managedClusterSet, and returns the corresponding managedClusterSet object, and an error if there is any. -func (c *managedClusterSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ManagedClusterSet, err error) { - result = &v1alpha1.ManagedClusterSet{} +func (c *managedClusterSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ManagedClusterSet, err error) { + result = &v1beta2.ManagedClusterSet{} err = c.client.Get(). Resource("managedclustersets"). Name(name). @@ -59,12 +59,12 @@ func (c *managedClusterSets) Get(ctx context.Context, name string, options v1.Ge } // List takes label and field selectors, and returns the list of ManagedClusterSets that match those selectors. -func (c *managedClusterSets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ManagedClusterSetList, err error) { +func (c *managedClusterSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ManagedClusterSetList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.ManagedClusterSetList{} + result = &v1beta2.ManagedClusterSetList{} err = c.client.Get(). Resource("managedclustersets"). VersionedParams(&opts, scheme.ParameterCodec). @@ -89,8 +89,8 @@ func (c *managedClusterSets) Watch(ctx context.Context, opts v1.ListOptions) (wa } // Create takes the representation of a managedClusterSet and creates it. Returns the server's representation of the managedClusterSet, and an error, if there is any. -func (c *managedClusterSets) Create(ctx context.Context, managedClusterSet *v1alpha1.ManagedClusterSet, opts v1.CreateOptions) (result *v1alpha1.ManagedClusterSet, err error) { - result = &v1alpha1.ManagedClusterSet{} +func (c *managedClusterSets) Create(ctx context.Context, managedClusterSet *v1beta2.ManagedClusterSet, opts v1.CreateOptions) (result *v1beta2.ManagedClusterSet, err error) { + result = &v1beta2.ManagedClusterSet{} err = c.client.Post(). Resource("managedclustersets"). VersionedParams(&opts, scheme.ParameterCodec). @@ -101,8 +101,8 @@ func (c *managedClusterSets) Create(ctx context.Context, managedClusterSet *v1al } // Update takes the representation of a managedClusterSet and updates it. Returns the server's representation of the managedClusterSet, and an error, if there is any. -func (c *managedClusterSets) Update(ctx context.Context, managedClusterSet *v1alpha1.ManagedClusterSet, opts v1.UpdateOptions) (result *v1alpha1.ManagedClusterSet, err error) { - result = &v1alpha1.ManagedClusterSet{} +func (c *managedClusterSets) Update(ctx context.Context, managedClusterSet *v1beta2.ManagedClusterSet, opts v1.UpdateOptions) (result *v1beta2.ManagedClusterSet, err error) { + result = &v1beta2.ManagedClusterSet{} err = c.client.Put(). Resource("managedclustersets"). Name(managedClusterSet.Name). @@ -115,8 +115,8 @@ func (c *managedClusterSets) Update(ctx context.Context, managedClusterSet *v1al // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *managedClusterSets) UpdateStatus(ctx context.Context, managedClusterSet *v1alpha1.ManagedClusterSet, opts v1.UpdateOptions) (result *v1alpha1.ManagedClusterSet, err error) { - result = &v1alpha1.ManagedClusterSet{} +func (c *managedClusterSets) UpdateStatus(ctx context.Context, managedClusterSet *v1beta2.ManagedClusterSet, opts v1.UpdateOptions) (result *v1beta2.ManagedClusterSet, err error) { + result = &v1beta2.ManagedClusterSet{} err = c.client.Put(). Resource("managedclustersets"). Name(managedClusterSet.Name). @@ -154,8 +154,8 @@ func (c *managedClusterSets) DeleteCollection(ctx context.Context, opts v1.Delet } // Patch applies the patch and returns the patched managedClusterSet. -func (c *managedClusterSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ManagedClusterSet, err error) { - result = &v1alpha1.ManagedClusterSet{} +func (c *managedClusterSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ManagedClusterSet, err error) { + result = &v1beta2.ManagedClusterSet{} err = c.client.Patch(pt). Resource("managedclustersets"). Name(name). diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/managedclustersetbinding.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta2/managedclustersetbinding.go similarity index 68% rename from controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/managedclustersetbinding.go rename to controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta2/managedclustersetbinding.go index 102c9299c..eb24f1250 100644 --- a/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1alpha1/managedclustersetbinding.go +++ b/controlplane/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta2/managedclustersetbinding.go @@ -1,6 +1,6 @@ // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1beta2 import ( "context" @@ -11,7 +11,7 @@ import ( watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" scheme "open-cluster-management.io/api/client/cluster/clientset/versioned/scheme" - v1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" + v1beta2 "open-cluster-management.io/api/cluster/v1beta2" ) // ManagedClusterSetBindingsGetter has a method to return a ManagedClusterSetBindingInterface. @@ -22,14 +22,15 @@ type ManagedClusterSetBindingsGetter interface { // ManagedClusterSetBindingInterface has methods to work with ManagedClusterSetBinding resources. type ManagedClusterSetBindingInterface interface { - Create(ctx context.Context, managedClusterSetBinding *v1alpha1.ManagedClusterSetBinding, opts v1.CreateOptions) (*v1alpha1.ManagedClusterSetBinding, error) - Update(ctx context.Context, managedClusterSetBinding *v1alpha1.ManagedClusterSetBinding, opts v1.UpdateOptions) (*v1alpha1.ManagedClusterSetBinding, error) + Create(ctx context.Context, managedClusterSetBinding *v1beta2.ManagedClusterSetBinding, opts v1.CreateOptions) (*v1beta2.ManagedClusterSetBinding, error) + Update(ctx context.Context, managedClusterSetBinding *v1beta2.ManagedClusterSetBinding, opts v1.UpdateOptions) (*v1beta2.ManagedClusterSetBinding, error) + UpdateStatus(ctx context.Context, managedClusterSetBinding *v1beta2.ManagedClusterSetBinding, opts v1.UpdateOptions) (*v1beta2.ManagedClusterSetBinding, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ManagedClusterSetBinding, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ManagedClusterSetBindingList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.ManagedClusterSetBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta2.ManagedClusterSetBindingList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ManagedClusterSetBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ManagedClusterSetBinding, err error) ManagedClusterSetBindingExpansion } @@ -40,7 +41,7 @@ type managedClusterSetBindings struct { } // newManagedClusterSetBindings returns a ManagedClusterSetBindings -func newManagedClusterSetBindings(c *ClusterV1alpha1Client, namespace string) *managedClusterSetBindings { +func newManagedClusterSetBindings(c *ClusterV1beta2Client, namespace string) *managedClusterSetBindings { return &managedClusterSetBindings{ client: c.RESTClient(), ns: namespace, @@ -48,8 +49,8 @@ func newManagedClusterSetBindings(c *ClusterV1alpha1Client, namespace string) *m } // Get takes name of the managedClusterSetBinding, and returns the corresponding managedClusterSetBinding object, and an error if there is any. -func (c *managedClusterSetBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ManagedClusterSetBinding, err error) { - result = &v1alpha1.ManagedClusterSetBinding{} +func (c *managedClusterSetBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ManagedClusterSetBinding, err error) { + result = &v1beta2.ManagedClusterSetBinding{} err = c.client.Get(). Namespace(c.ns). Resource("managedclustersetbindings"). @@ -61,12 +62,12 @@ func (c *managedClusterSetBindings) Get(ctx context.Context, name string, option } // List takes label and field selectors, and returns the list of ManagedClusterSetBindings that match those selectors. -func (c *managedClusterSetBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ManagedClusterSetBindingList, err error) { +func (c *managedClusterSetBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ManagedClusterSetBindingList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.ManagedClusterSetBindingList{} + result = &v1beta2.ManagedClusterSetBindingList{} err = c.client.Get(). Namespace(c.ns). Resource("managedclustersetbindings"). @@ -93,8 +94,8 @@ func (c *managedClusterSetBindings) Watch(ctx context.Context, opts v1.ListOptio } // Create takes the representation of a managedClusterSetBinding and creates it. Returns the server's representation of the managedClusterSetBinding, and an error, if there is any. -func (c *managedClusterSetBindings) Create(ctx context.Context, managedClusterSetBinding *v1alpha1.ManagedClusterSetBinding, opts v1.CreateOptions) (result *v1alpha1.ManagedClusterSetBinding, err error) { - result = &v1alpha1.ManagedClusterSetBinding{} +func (c *managedClusterSetBindings) Create(ctx context.Context, managedClusterSetBinding *v1beta2.ManagedClusterSetBinding, opts v1.CreateOptions) (result *v1beta2.ManagedClusterSetBinding, err error) { + result = &v1beta2.ManagedClusterSetBinding{} err = c.client.Post(). Namespace(c.ns). Resource("managedclustersetbindings"). @@ -106,8 +107,8 @@ func (c *managedClusterSetBindings) Create(ctx context.Context, managedClusterSe } // Update takes the representation of a managedClusterSetBinding and updates it. Returns the server's representation of the managedClusterSetBinding, and an error, if there is any. -func (c *managedClusterSetBindings) Update(ctx context.Context, managedClusterSetBinding *v1alpha1.ManagedClusterSetBinding, opts v1.UpdateOptions) (result *v1alpha1.ManagedClusterSetBinding, err error) { - result = &v1alpha1.ManagedClusterSetBinding{} +func (c *managedClusterSetBindings) Update(ctx context.Context, managedClusterSetBinding *v1beta2.ManagedClusterSetBinding, opts v1.UpdateOptions) (result *v1beta2.ManagedClusterSetBinding, err error) { + result = &v1beta2.ManagedClusterSetBinding{} err = c.client.Put(). Namespace(c.ns). Resource("managedclustersetbindings"). @@ -119,6 +120,22 @@ func (c *managedClusterSetBindings) Update(ctx context.Context, managedClusterSe return } +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *managedClusterSetBindings) UpdateStatus(ctx context.Context, managedClusterSetBinding *v1beta2.ManagedClusterSetBinding, opts v1.UpdateOptions) (result *v1beta2.ManagedClusterSetBinding, err error) { + result = &v1beta2.ManagedClusterSetBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("managedclustersetbindings"). + Name(managedClusterSetBinding.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(managedClusterSetBinding). + Do(ctx). + Into(result) + return +} + // Delete takes name of the managedClusterSetBinding and deletes it. Returns an error if one occurs. func (c *managedClusterSetBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { return c.client.Delete(). @@ -147,8 +164,8 @@ func (c *managedClusterSetBindings) DeleteCollection(ctx context.Context, opts v } // Patch applies the patch and returns the patched managedClusterSetBinding. -func (c *managedClusterSetBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ManagedClusterSetBinding, err error) { - result = &v1alpha1.ManagedClusterSetBinding{} +func (c *managedClusterSetBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ManagedClusterSetBinding, err error) { + result = &v1beta2.ManagedClusterSetBinding{} err = c.client.Patch(pt). Namespace(c.ns). Resource("managedclustersetbindings"). diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/interface.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/interface.go index 775959fb8..e40077311 100644 --- a/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/interface.go +++ b/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/interface.go @@ -6,6 +6,7 @@ import ( v1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1" v1alpha1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1alpha1" v1beta1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta1" + v1beta2 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta2" internalinterfaces "open-cluster-management.io/api/client/cluster/informers/externalversions/internalinterfaces" ) @@ -17,6 +18,8 @@ type Interface interface { V1alpha1() v1alpha1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface + // V1beta2 provides access to shared informers for resources in V1beta2. + V1beta2() v1beta2.Interface } type group struct { @@ -44,3 +47,8 @@ func (g *group) V1alpha1() v1alpha1.Interface { func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) } + +// V1beta2 returns a new v1beta2.Interface. +func (g *group) V1beta2() v1beta2.Interface { + return v1beta2.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1alpha1/interface.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1alpha1/interface.go index e3b2c468f..a475c351e 100644 --- a/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1alpha1/interface.go +++ b/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1alpha1/interface.go @@ -12,14 +12,6 @@ type Interface interface { AddOnPlacementScores() AddOnPlacementScoreInformer // ClusterClaims returns a ClusterClaimInformer. ClusterClaims() ClusterClaimInformer - // ManagedClusterSets returns a ManagedClusterSetInformer. - ManagedClusterSets() ManagedClusterSetInformer - // ManagedClusterSetBindings returns a ManagedClusterSetBindingInformer. - ManagedClusterSetBindings() ManagedClusterSetBindingInformer - // Placements returns a PlacementInformer. - Placements() PlacementInformer - // PlacementDecisions returns a PlacementDecisionInformer. - PlacementDecisions() PlacementDecisionInformer } type version struct { @@ -42,23 +34,3 @@ func (v *version) AddOnPlacementScores() AddOnPlacementScoreInformer { func (v *version) ClusterClaims() ClusterClaimInformer { return &clusterClaimInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } - -// ManagedClusterSets returns a ManagedClusterSetInformer. -func (v *version) ManagedClusterSets() ManagedClusterSetInformer { - return &managedClusterSetInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} - -// ManagedClusterSetBindings returns a ManagedClusterSetBindingInformer. -func (v *version) ManagedClusterSetBindings() ManagedClusterSetBindingInformer { - return &managedClusterSetBindingInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// Placements returns a PlacementInformer. -func (v *version) Placements() PlacementInformer { - return &placementInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// PlacementDecisions returns a PlacementDecisionInformer. -func (v *version) PlacementDecisions() PlacementDecisionInformer { - return &placementDecisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1alpha1/placement.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1alpha1/placement.go deleted file mode 100644 index 0145fbf07..000000000 --- a/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1alpha1/placement.go +++ /dev/null @@ -1,74 +0,0 @@ -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - time "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" - versioned "open-cluster-management.io/api/client/cluster/clientset/versioned" - internalinterfaces "open-cluster-management.io/api/client/cluster/informers/externalversions/internalinterfaces" - v1alpha1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1" - clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" -) - -// PlacementInformer provides access to a shared informer and lister for -// Placements. -type PlacementInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.PlacementLister -} - -type placementInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewPlacementInformer constructs a new informer for Placement type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewPlacementInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredPlacementInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredPlacementInformer constructs a new informer for Placement type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredPlacementInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().Placements(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().Placements(namespace).Watch(context.TODO(), options) - }, - }, - &clusterv1alpha1.Placement{}, - resyncPeriod, - indexers, - ) -} - -func (f *placementInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredPlacementInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *placementInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&clusterv1alpha1.Placement{}, f.defaultInformer) -} - -func (f *placementInformer) Lister() v1alpha1.PlacementLister { - return v1alpha1.NewPlacementLister(f.Informer().GetIndexer()) -} diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1alpha1/placementdecision.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1alpha1/placementdecision.go deleted file mode 100644 index 8a25ce04b..000000000 --- a/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1alpha1/placementdecision.go +++ /dev/null @@ -1,74 +0,0 @@ -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - time "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" - versioned "open-cluster-management.io/api/client/cluster/clientset/versioned" - internalinterfaces "open-cluster-management.io/api/client/cluster/informers/externalversions/internalinterfaces" - v1alpha1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1" - clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" -) - -// PlacementDecisionInformer provides access to a shared informer and lister for -// PlacementDecisions. -type PlacementDecisionInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.PlacementDecisionLister -} - -type placementDecisionInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewPlacementDecisionInformer constructs a new informer for PlacementDecision type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewPlacementDecisionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredPlacementDecisionInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredPlacementDecisionInformer constructs a new informer for PlacementDecision type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredPlacementDecisionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().PlacementDecisions(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().PlacementDecisions(namespace).Watch(context.TODO(), options) - }, - }, - &clusterv1alpha1.PlacementDecision{}, - resyncPeriod, - indexers, - ) -} - -func (f *placementDecisionInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredPlacementDecisionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *placementDecisionInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&clusterv1alpha1.PlacementDecision{}, f.defaultInformer) -} - -func (f *placementDecisionInformer) Lister() v1alpha1.PlacementDecisionLister { - return v1alpha1.NewPlacementDecisionLister(f.Informer().GetIndexer()) -} diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta2/interface.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta2/interface.go new file mode 100644 index 000000000..95835b643 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta2/interface.go @@ -0,0 +1,36 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta2 + +import ( + internalinterfaces "open-cluster-management.io/api/client/cluster/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ManagedClusterSets returns a ManagedClusterSetInformer. + ManagedClusterSets() ManagedClusterSetInformer + // ManagedClusterSetBindings returns a ManagedClusterSetBindingInformer. + ManagedClusterSetBindings() ManagedClusterSetBindingInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ManagedClusterSets returns a ManagedClusterSetInformer. +func (v *version) ManagedClusterSets() ManagedClusterSetInformer { + return &managedClusterSetInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ManagedClusterSetBindings returns a ManagedClusterSetBindingInformer. +func (v *version) ManagedClusterSetBindings() ManagedClusterSetBindingInformer { + return &managedClusterSetBindingInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1alpha1/managedclusterset.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta2/managedclusterset.go similarity index 79% rename from controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1alpha1/managedclusterset.go rename to controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta2/managedclusterset.go index 590481c8e..4afcea2b8 100644 --- a/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1alpha1/managedclusterset.go +++ b/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta2/managedclusterset.go @@ -1,6 +1,6 @@ // Code generated by informer-gen. DO NOT EDIT. -package v1alpha1 +package v1beta2 import ( "context" @@ -12,15 +12,15 @@ import ( cache "k8s.io/client-go/tools/cache" versioned "open-cluster-management.io/api/client/cluster/clientset/versioned" internalinterfaces "open-cluster-management.io/api/client/cluster/informers/externalversions/internalinterfaces" - v1alpha1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1" - clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" + v1beta2 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta2" + clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2" ) // ManagedClusterSetInformer provides access to a shared informer and lister for // ManagedClusterSets. type ManagedClusterSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ManagedClusterSetLister + Lister() v1beta2.ManagedClusterSetLister } type managedClusterSetInformer struct { @@ -45,16 +45,16 @@ func NewFilteredManagedClusterSetInformer(client versioned.Interface, resyncPeri if tweakListOptions != nil { tweakListOptions(&options) } - return client.ClusterV1alpha1().ManagedClusterSets().List(context.TODO(), options) + return client.ClusterV1beta2().ManagedClusterSets().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ClusterV1alpha1().ManagedClusterSets().Watch(context.TODO(), options) + return client.ClusterV1beta2().ManagedClusterSets().Watch(context.TODO(), options) }, }, - &clusterv1alpha1.ManagedClusterSet{}, + &clusterv1beta2.ManagedClusterSet{}, resyncPeriod, indexers, ) @@ -65,9 +65,9 @@ func (f *managedClusterSetInformer) defaultInformer(client versioned.Interface, } func (f *managedClusterSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&clusterv1alpha1.ManagedClusterSet{}, f.defaultInformer) + return f.factory.InformerFor(&clusterv1beta2.ManagedClusterSet{}, f.defaultInformer) } -func (f *managedClusterSetInformer) Lister() v1alpha1.ManagedClusterSetLister { - return v1alpha1.NewManagedClusterSetLister(f.Informer().GetIndexer()) +func (f *managedClusterSetInformer) Lister() v1beta2.ManagedClusterSetLister { + return v1beta2.NewManagedClusterSetLister(f.Informer().GetIndexer()) } diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1alpha1/managedclustersetbinding.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta2/managedclustersetbinding.go similarity index 78% rename from controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1alpha1/managedclustersetbinding.go rename to controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta2/managedclustersetbinding.go index 605cd1fd4..be3c0051e 100644 --- a/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1alpha1/managedclustersetbinding.go +++ b/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta2/managedclustersetbinding.go @@ -1,6 +1,6 @@ // Code generated by informer-gen. DO NOT EDIT. -package v1alpha1 +package v1beta2 import ( "context" @@ -12,15 +12,15 @@ import ( cache "k8s.io/client-go/tools/cache" versioned "open-cluster-management.io/api/client/cluster/clientset/versioned" internalinterfaces "open-cluster-management.io/api/client/cluster/informers/externalversions/internalinterfaces" - v1alpha1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1" - clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" + v1beta2 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta2" + clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2" ) // ManagedClusterSetBindingInformer provides access to a shared informer and lister for // ManagedClusterSetBindings. type ManagedClusterSetBindingInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ManagedClusterSetBindingLister + Lister() v1beta2.ManagedClusterSetBindingLister } type managedClusterSetBindingInformer struct { @@ -46,16 +46,16 @@ func NewFilteredManagedClusterSetBindingInformer(client versioned.Interface, nam if tweakListOptions != nil { tweakListOptions(&options) } - return client.ClusterV1alpha1().ManagedClusterSetBindings(namespace).List(context.TODO(), options) + return client.ClusterV1beta2().ManagedClusterSetBindings(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ClusterV1alpha1().ManagedClusterSetBindings(namespace).Watch(context.TODO(), options) + return client.ClusterV1beta2().ManagedClusterSetBindings(namespace).Watch(context.TODO(), options) }, }, - &clusterv1alpha1.ManagedClusterSetBinding{}, + &clusterv1beta2.ManagedClusterSetBinding{}, resyncPeriod, indexers, ) @@ -66,9 +66,9 @@ func (f *managedClusterSetBindingInformer) defaultInformer(client versioned.Inte } func (f *managedClusterSetBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&clusterv1alpha1.ManagedClusterSetBinding{}, f.defaultInformer) + return f.factory.InformerFor(&clusterv1beta2.ManagedClusterSetBinding{}, f.defaultInformer) } -func (f *managedClusterSetBindingInformer) Lister() v1alpha1.ManagedClusterSetBindingLister { - return v1alpha1.NewManagedClusterSetBindingLister(f.Informer().GetIndexer()) +func (f *managedClusterSetBindingInformer) Lister() v1beta2.ManagedClusterSetBindingLister { + return v1beta2.NewManagedClusterSetBindingLister(f.Informer().GetIndexer()) } diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/generic.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/generic.go index 88388cb22..6956a771d 100644 --- a/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/generic.go +++ b/controlplane/vendor/open-cluster-management.io/api/client/cluster/informers/externalversions/generic.go @@ -10,6 +10,7 @@ import ( v1 "open-cluster-management.io/api/cluster/v1" v1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" v1beta1 "open-cluster-management.io/api/cluster/v1beta1" + v1beta2 "open-cluster-management.io/api/cluster/v1beta2" ) // GenericInformer is type of SharedIndexInformer which will locate and delegate to other @@ -47,14 +48,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().AddOnPlacementScores().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("clusterclaims"): return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().ClusterClaims().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("managedclustersets"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().ManagedClusterSets().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("managedclustersetbindings"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().ManagedClusterSetBindings().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("placements"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().Placements().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("placementdecisions"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().PlacementDecisions().Informer()}, nil // Group=cluster.open-cluster-management.io, Version=v1beta1 case v1beta1.SchemeGroupVersion.WithResource("managedclustersets"): @@ -66,6 +59,12 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case v1beta1.SchemeGroupVersion.WithResource("placementdecisions"): return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1beta1().PlacementDecisions().Informer()}, nil + // Group=cluster.open-cluster-management.io, Version=v1beta2 + case v1beta2.SchemeGroupVersion.WithResource("managedclustersets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1beta2().ManagedClusterSets().Informer()}, nil + case v1beta2.SchemeGroupVersion.WithResource("managedclustersetbindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1beta2().ManagedClusterSetBindings().Informer()}, nil + } return nil, fmt.Errorf("no informer found for %v", resource) diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1/expansion_generated.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1/expansion_generated.go index 59b98eab2..387ae5c78 100644 --- a/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1/expansion_generated.go +++ b/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1/expansion_generated.go @@ -13,31 +13,3 @@ type AddOnPlacementScoreNamespaceListerExpansion interface{} // ClusterClaimListerExpansion allows custom methods to be added to // ClusterClaimLister. type ClusterClaimListerExpansion interface{} - -// ManagedClusterSetListerExpansion allows custom methods to be added to -// ManagedClusterSetLister. -type ManagedClusterSetListerExpansion interface{} - -// ManagedClusterSetBindingListerExpansion allows custom methods to be added to -// ManagedClusterSetBindingLister. -type ManagedClusterSetBindingListerExpansion interface{} - -// ManagedClusterSetBindingNamespaceListerExpansion allows custom methods to be added to -// ManagedClusterSetBindingNamespaceLister. -type ManagedClusterSetBindingNamespaceListerExpansion interface{} - -// PlacementListerExpansion allows custom methods to be added to -// PlacementLister. -type PlacementListerExpansion interface{} - -// PlacementNamespaceListerExpansion allows custom methods to be added to -// PlacementNamespaceLister. -type PlacementNamespaceListerExpansion interface{} - -// PlacementDecisionListerExpansion allows custom methods to be added to -// PlacementDecisionLister. -type PlacementDecisionListerExpansion interface{} - -// PlacementDecisionNamespaceListerExpansion allows custom methods to be added to -// PlacementDecisionNamespaceLister. -type PlacementDecisionNamespaceListerExpansion interface{} diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1/placement.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1/placement.go deleted file mode 100644 index 70b7e12ab..000000000 --- a/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1/placement.go +++ /dev/null @@ -1,83 +0,0 @@ -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" - v1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" -) - -// PlacementLister helps list Placements. -// All objects returned here must be treated as read-only. -type PlacementLister interface { - // List lists all Placements in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.Placement, err error) - // Placements returns an object that can list and get Placements. - Placements(namespace string) PlacementNamespaceLister - PlacementListerExpansion -} - -// placementLister implements the PlacementLister interface. -type placementLister struct { - indexer cache.Indexer -} - -// NewPlacementLister returns a new PlacementLister. -func NewPlacementLister(indexer cache.Indexer) PlacementLister { - return &placementLister{indexer: indexer} -} - -// List lists all Placements in the indexer. -func (s *placementLister) List(selector labels.Selector) (ret []*v1alpha1.Placement, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Placement)) - }) - return ret, err -} - -// Placements returns an object that can list and get Placements. -func (s *placementLister) Placements(namespace string) PlacementNamespaceLister { - return placementNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// PlacementNamespaceLister helps list and get Placements. -// All objects returned here must be treated as read-only. -type PlacementNamespaceLister interface { - // List lists all Placements in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.Placement, err error) - // Get retrieves the Placement from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.Placement, error) - PlacementNamespaceListerExpansion -} - -// placementNamespaceLister implements the PlacementNamespaceLister -// interface. -type placementNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Placements in the indexer for a given namespace. -func (s placementNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Placement, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Placement)) - }) - return ret, err -} - -// Get retrieves the Placement from the indexer for a given namespace and name. -func (s placementNamespaceLister) Get(name string) (*v1alpha1.Placement, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("placement"), name) - } - return obj.(*v1alpha1.Placement), nil -} diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1/placementdecision.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1/placementdecision.go deleted file mode 100644 index 66b1db244..000000000 --- a/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1/placementdecision.go +++ /dev/null @@ -1,83 +0,0 @@ -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" - v1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" -) - -// PlacementDecisionLister helps list PlacementDecisions. -// All objects returned here must be treated as read-only. -type PlacementDecisionLister interface { - // List lists all PlacementDecisions in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.PlacementDecision, err error) - // PlacementDecisions returns an object that can list and get PlacementDecisions. - PlacementDecisions(namespace string) PlacementDecisionNamespaceLister - PlacementDecisionListerExpansion -} - -// placementDecisionLister implements the PlacementDecisionLister interface. -type placementDecisionLister struct { - indexer cache.Indexer -} - -// NewPlacementDecisionLister returns a new PlacementDecisionLister. -func NewPlacementDecisionLister(indexer cache.Indexer) PlacementDecisionLister { - return &placementDecisionLister{indexer: indexer} -} - -// List lists all PlacementDecisions in the indexer. -func (s *placementDecisionLister) List(selector labels.Selector) (ret []*v1alpha1.PlacementDecision, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.PlacementDecision)) - }) - return ret, err -} - -// PlacementDecisions returns an object that can list and get PlacementDecisions. -func (s *placementDecisionLister) PlacementDecisions(namespace string) PlacementDecisionNamespaceLister { - return placementDecisionNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// PlacementDecisionNamespaceLister helps list and get PlacementDecisions. -// All objects returned here must be treated as read-only. -type PlacementDecisionNamespaceLister interface { - // List lists all PlacementDecisions in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.PlacementDecision, err error) - // Get retrieves the PlacementDecision from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.PlacementDecision, error) - PlacementDecisionNamespaceListerExpansion -} - -// placementDecisionNamespaceLister implements the PlacementDecisionNamespaceLister -// interface. -type placementDecisionNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all PlacementDecisions in the indexer for a given namespace. -func (s placementDecisionNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.PlacementDecision, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.PlacementDecision)) - }) - return ret, err -} - -// Get retrieves the PlacementDecision from the indexer for a given namespace and name. -func (s placementDecisionNamespaceLister) Get(name string) (*v1alpha1.PlacementDecision, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("placementdecision"), name) - } - return obj.(*v1alpha1.PlacementDecision), nil -} diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1beta2/expansion_generated.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1beta2/expansion_generated.go new file mode 100644 index 000000000..b6e81af91 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1beta2/expansion_generated.go @@ -0,0 +1,15 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta2 + +// ManagedClusterSetListerExpansion allows custom methods to be added to +// ManagedClusterSetLister. +type ManagedClusterSetListerExpansion interface{} + +// ManagedClusterSetBindingListerExpansion allows custom methods to be added to +// ManagedClusterSetBindingLister. +type ManagedClusterSetBindingListerExpansion interface{} + +// ManagedClusterSetBindingNamespaceListerExpansion allows custom methods to be added to +// ManagedClusterSetBindingNamespaceLister. +type ManagedClusterSetBindingNamespaceListerExpansion interface{} diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1/managedclusterset.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1beta2/managedclusterset.go similarity index 71% rename from controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1/managedclusterset.go rename to controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1beta2/managedclusterset.go index 567a6d4ba..05878184c 100644 --- a/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1/managedclusterset.go +++ b/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1beta2/managedclusterset.go @@ -1,12 +1,12 @@ // Code generated by lister-gen. DO NOT EDIT. -package v1alpha1 +package v1beta2 import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - v1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" + v1beta2 "open-cluster-management.io/api/cluster/v1beta2" ) // ManagedClusterSetLister helps list ManagedClusterSets. @@ -14,10 +14,10 @@ import ( type ManagedClusterSetLister interface { // List lists all ManagedClusterSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ManagedClusterSet, err error) + List(selector labels.Selector) (ret []*v1beta2.ManagedClusterSet, err error) // Get retrieves the ManagedClusterSet from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ManagedClusterSet, error) + Get(name string) (*v1beta2.ManagedClusterSet, error) ManagedClusterSetListerExpansion } @@ -32,21 +32,21 @@ func NewManagedClusterSetLister(indexer cache.Indexer) ManagedClusterSetLister { } // List lists all ManagedClusterSets in the indexer. -func (s *managedClusterSetLister) List(selector labels.Selector) (ret []*v1alpha1.ManagedClusterSet, err error) { +func (s *managedClusterSetLister) List(selector labels.Selector) (ret []*v1beta2.ManagedClusterSet, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ManagedClusterSet)) + ret = append(ret, m.(*v1beta2.ManagedClusterSet)) }) return ret, err } // Get retrieves the ManagedClusterSet from the index for a given name. -func (s *managedClusterSetLister) Get(name string) (*v1alpha1.ManagedClusterSet, error) { +func (s *managedClusterSetLister) Get(name string) (*v1beta2.ManagedClusterSet, error) { obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("managedclusterset"), name) + return nil, errors.NewNotFound(v1beta2.Resource("managedclusterset"), name) } - return obj.(*v1alpha1.ManagedClusterSet), nil + return obj.(*v1beta2.ManagedClusterSet), nil } diff --git a/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1/managedclustersetbinding.go b/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1beta2/managedclustersetbinding.go similarity index 79% rename from controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1/managedclustersetbinding.go rename to controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1beta2/managedclustersetbinding.go index 27bfc74da..1ffdcd512 100644 --- a/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1/managedclustersetbinding.go +++ b/controlplane/vendor/open-cluster-management.io/api/client/cluster/listers/cluster/v1beta2/managedclustersetbinding.go @@ -1,12 +1,12 @@ // Code generated by lister-gen. DO NOT EDIT. -package v1alpha1 +package v1beta2 import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - v1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" + v1beta2 "open-cluster-management.io/api/cluster/v1beta2" ) // ManagedClusterSetBindingLister helps list ManagedClusterSetBindings. @@ -14,7 +14,7 @@ import ( type ManagedClusterSetBindingLister interface { // List lists all ManagedClusterSetBindings in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ManagedClusterSetBinding, err error) + List(selector labels.Selector) (ret []*v1beta2.ManagedClusterSetBinding, err error) // ManagedClusterSetBindings returns an object that can list and get ManagedClusterSetBindings. ManagedClusterSetBindings(namespace string) ManagedClusterSetBindingNamespaceLister ManagedClusterSetBindingListerExpansion @@ -31,9 +31,9 @@ func NewManagedClusterSetBindingLister(indexer cache.Indexer) ManagedClusterSetB } // List lists all ManagedClusterSetBindings in the indexer. -func (s *managedClusterSetBindingLister) List(selector labels.Selector) (ret []*v1alpha1.ManagedClusterSetBinding, err error) { +func (s *managedClusterSetBindingLister) List(selector labels.Selector) (ret []*v1beta2.ManagedClusterSetBinding, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ManagedClusterSetBinding)) + ret = append(ret, m.(*v1beta2.ManagedClusterSetBinding)) }) return ret, err } @@ -48,10 +48,10 @@ func (s *managedClusterSetBindingLister) ManagedClusterSetBindings(namespace str type ManagedClusterSetBindingNamespaceLister interface { // List lists all ManagedClusterSetBindings in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ManagedClusterSetBinding, err error) + List(selector labels.Selector) (ret []*v1beta2.ManagedClusterSetBinding, err error) // Get retrieves the ManagedClusterSetBinding from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ManagedClusterSetBinding, error) + Get(name string) (*v1beta2.ManagedClusterSetBinding, error) ManagedClusterSetBindingNamespaceListerExpansion } @@ -63,21 +63,21 @@ type managedClusterSetBindingNamespaceLister struct { } // List lists all ManagedClusterSetBindings in the indexer for a given namespace. -func (s managedClusterSetBindingNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ManagedClusterSetBinding, err error) { +func (s managedClusterSetBindingNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.ManagedClusterSetBinding, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ManagedClusterSetBinding)) + ret = append(ret, m.(*v1beta2.ManagedClusterSetBinding)) }) return ret, err } // Get retrieves the ManagedClusterSetBinding from the indexer for a given namespace and name. -func (s managedClusterSetBindingNamespaceLister) Get(name string) (*v1alpha1.ManagedClusterSetBinding, error) { +func (s managedClusterSetBindingNamespaceLister) Get(name string) (*v1beta2.ManagedClusterSetBinding, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("managedclustersetbinding"), name) + return nil, errors.NewNotFound(v1beta2.Resource("managedclustersetbinding"), name) } - return obj.(*v1alpha1.ManagedClusterSetBinding), nil + return obj.(*v1beta2.ManagedClusterSetBinding), nil } diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml b/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml deleted file mode 100644 index 402ac242e..000000000 --- a/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml +++ /dev/null @@ -1,92 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: managedclustersets.cluster.open-cluster-management.io -spec: - group: cluster.open-cluster-management.io - names: - kind: ManagedClusterSet - listKind: ManagedClusterSetList - plural: managedclustersets - shortNames: - - mclset - - mclsets - singular: managedclusterset - scope: Cluster - preserveUnknownFields: false - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: "ManagedClusterSet defines a group of ManagedClusters that user's workload can run on. A workload can be defined to deployed on a ManagedClusterSet, which mean: 1. The workload can run on any ManagedCluster in the ManagedClusterSet 2. The workload cannot run on any ManagedCluster outside the ManagedClusterSet 3. The service exposed by the workload can be shared in any ManagedCluster in the ManagedClusterSet \n In order to assign a ManagedCluster to a certian ManagedClusterSet, add a label with name `cluster.open-cluster-management.io/clusterset` on the ManagedCluster to refers to the ManagedClusterSet. User is not allow to add/remove this label on a ManagedCluster unless they have a RBAC rule to CREATE on a virtual subresource of managedclustersets/join. In order to update this label, user must have the permission on both the old and new ManagedClusterSet." - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec defines the attributes of the ManagedClusterSet - type: object - status: - description: Status represents the current status of the ManagedClusterSet - type: object - properties: - conditions: - description: Conditions contains the different condition statuses for this ManagedClusterSet. - type: array - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - type: object - required: - - lastTransitionTime - - message - - reason - - status - - type - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - type: string - format: date-time - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - type: string - maxLength: 32768 - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - type: integer - format: int64 - minimum: 0 - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - type: string - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - status: - description: status of the condition, one of True, False, Unknown. - type: string - enum: - - "True" - - "False" - - Unknown - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - type: string - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml b/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml deleted file mode 100644 index 5e0f47d17..000000000 --- a/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml +++ /dev/null @@ -1,47 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: managedclustersetbindings.cluster.open-cluster-management.io -spec: - group: cluster.open-cluster-management.io - names: - kind: ManagedClusterSetBinding - listKind: ManagedClusterSetBindingList - plural: managedclustersetbindings - shortNames: - - mclsetbinding - - mclsetbindings - singular: managedclustersetbinding - scope: Namespaced - preserveUnknownFields: false - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: ManagedClusterSetBinding projects a ManagedClusterSet into a certain namespace. User is able to create a ManagedClusterSetBinding in a namespace and bind it to a ManagedClusterSet if they have an RBAC rule to CREATE on the virtual subresource of managedclustersets/bind. Workloads created in the same namespace can only be distributed to ManagedClusters in ManagedClusterSets bound in this namespace by higher level controllers. - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec defines the attributes of ManagedClusterSetBinding. - type: object - properties: - clusterSet: - description: ClusterSet is the name of the ManagedClusterSet to bind. It must match the instance name of the ManagedClusterSetBinding and cannot change once created. User is allowed to set this field if they have an RBAC rule to CREATE on the virtual subresource of managedclustersets/bind. - type: string - minLength: 1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/0000_03_clusters.open-cluster-management.io_placements.crd.yaml b/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/0000_03_clusters.open-cluster-management.io_placements.crd.yaml deleted file mode 100644 index c8c53f4ab..000000000 --- a/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/0000_03_clusters.open-cluster-management.io_placements.crd.yaml +++ /dev/null @@ -1,265 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: placements.cluster.open-cluster-management.io -spec: - group: cluster.open-cluster-management.io - names: - kind: Placement - listKind: PlacementList - plural: placements - singular: placement - scope: Namespaced - preserveUnknownFields: false - versions: - - additionalPrinterColumns: - - jsonPath: .status.conditions[?(@.type=="PlacementSatisfied")].status - name: Succeeded - type: string - - jsonPath: .status.conditions[?(@.type=="PlacementSatisfied")].reason - name: Reason - type: string - - jsonPath: .status.numberOfSelectedClusters - name: SelectedClusters - type: integer - name: v1alpha1 - schema: - openAPIV3Schema: - description: "Placement defines a rule to select a set of ManagedClusters from the ManagedClusterSets bound to the placement namespace. \n Here is how the placement policy combines with other selection methods to determine a matching list of ManagedClusters: 1) Kubernetes clusters are registered with hub as cluster-scoped ManagedClusters; 2) ManagedClusters are organized into cluster-scoped ManagedClusterSets; 3) ManagedClusterSets are bound to workload namespaces; 4) Namespace-scoped Placements specify a slice of ManagedClusterSets which select a working set of potential ManagedClusters; 5) Then Placements subselect from that working set using label/claim selection. \n No ManagedCluster will be selected if no ManagedClusterSet is bound to the placement namespace. User is able to bind a ManagedClusterSet to a namespace by creating a ManagedClusterSetBinding in that namespace if they have a RBAC rule to CREATE on the virtual subresource of `managedclustersets/bind`. \n A slice of PlacementDecisions with label cluster.open-cluster-management.io/placement={placement name} will be created to represent the ManagedClusters selected by this placement. \n If a ManagedCluster is selected and added into the PlacementDecisions, other components may apply workload on it; once it is removed from the PlacementDecisions, the workload applied on this ManagedCluster should be evicted accordingly." - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec defines the attributes of Placement. - type: object - properties: - clusterSets: - description: ClusterSets represent the ManagedClusterSets from which the ManagedClusters are selected. If the slice is empty, ManagedClusters will be selected from the ManagedClusterSets bound to the placement namespace, otherwise ManagedClusters will be selected from the intersection of this slice and the ManagedClusterSets bound to the placement namespace. - type: array - items: - type: string - numberOfClusters: - description: NumberOfClusters represents the desired number of ManagedClusters to be selected which meet the placement requirements. 1) If not specified, all ManagedClusters which meet the placement requirements (including ClusterSets, and Predicates) will be selected; 2) Otherwise if the nubmer of ManagedClusters meet the placement requirements is larger than NumberOfClusters, a random subset with desired number of ManagedClusters will be selected; 3) If the nubmer of ManagedClusters meet the placement requirements is equal to NumberOfClusters, all of them will be selected; 4) If the nubmer of ManagedClusters meet the placement requirements is less than NumberOfClusters, all of them will be selected, and the status of condition `PlacementConditionSatisfied` will be set to false; - type: integer - format: int32 - predicates: - description: Predicates represent a slice of predicates to select ManagedClusters. The predicates are ORed. - type: array - items: - description: ClusterPredicate represents a predicate to select ManagedClusters. - type: object - properties: - requiredClusterSelector: - description: RequiredClusterSelector represents a selector of ManagedClusters by label and claim. If specified, 1) Any ManagedCluster, which does not match the selector, should not be selected by this ClusterPredicate; 2) If a selected ManagedCluster (of this ClusterPredicate) ceases to match the selector (e.g. due to an update) of any ClusterPredicate, it will be eventually removed from the placement decisions; 3) If a ManagedCluster (not selected previously) starts to match the selector, it will either be selected or at least has a chance to be selected (when NumberOfClusters is specified); - type: object - properties: - claimSelector: - description: ClaimSelector represents a selector of ManagedClusters by clusterClaims in status - type: object - properties: - matchExpressions: - description: matchExpressions is a list of cluster claim selector requirements. The requirements are ANDed. - type: array - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - type: object - required: - - key - - operator - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - type: array - items: - type: string - labelSelector: - description: LabelSelector represents a selector of ManagedClusters by label - type: object - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - type: array - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - type: object - required: - - key - - operator - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - type: array - items: - type: string - matchLabels: - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - additionalProperties: - type: string - prioritizerPolicy: - description: PrioritizerPolicy defines the policy of the prioritizers. If this field is unset, then default prioritizer mode and configurations are used. Referring to PrioritizerPolicy to see more description about Mode and Configurations. - type: object - properties: - configurations: - type: array - items: - description: PrioritizerConfig represents the configuration of prioritizer - type: object - properties: - name: - description: 'Name will be removed in v1beta1 and replaced by ScoreCoordinate.BuiltIn. If both Name and ScoreCoordinate.BuiltIn are defined, will use the value in ScoreCoordinate.BuiltIn. Name is the name of a prioritizer. Below are the valid names: 1) Balance: balance the decisions among the clusters. 2) Steady: ensure the existing decision is stabilized. 3) ResourceAllocatableCPU & ResourceAllocatableMemory: sort clusters based on the allocatable.' - type: string - scoreCoordinate: - description: ScoreCoordinate represents the configuration of the prioritizer and score source. - type: object - required: - - type - properties: - addOn: - description: When type is "AddOn", AddOn defines the resource name and score name. - type: object - required: - - resourceName - - scoreName - properties: - resourceName: - description: ResourceName defines the resource name of the AddOnPlacementScore. The placement prioritizer selects AddOnPlacementScore CR by this name. - type: string - scoreName: - description: ScoreName defines the score name inside AddOnPlacementScore. AddOnPlacementScore contains a list of score name and score value, ScoreName specify the score to be used by the prioritizer. - type: string - builtIn: - description: 'BuiltIn defines the name of a BuiltIn prioritizer. Below are the valid BuiltIn prioritizer names. 1) Balance: balance the decisions among the clusters. 2) Steady: ensure the existing decision is stabilized. 3) ResourceAllocatableCPU & ResourceAllocatableMemory: sort clusters based on the allocatable.' - type: string - type: - description: Type defines the type of the prioritizer score. Type is either "BuiltIn", "AddOn" or "", where "" is "BuiltIn" by default. When the type is "BuiltIn", need to specify a BuiltIn prioritizer name in BuiltIn. When the type is "AddOn", need to configure the score source in AddOn. - type: string - default: BuiltIn - enum: - - BuiltIn - - AddOn - weight: - description: Weight defines the weight of the prioritizer score. The value must be ranged in [-10,10]. Each prioritizer will calculate an integer score of a cluster in the range of [-100, 100]. The final score of a cluster will be sum(weight * prioritizer_score). A higher weight indicates that the prioritizer weights more in the cluster selection, while 0 weight indicates that the prioritizer is disabled. A negative weight indicates wants to select the last ones. - type: integer - format: int32 - default: 1 - maximum: 10 - minimum: -10 - mode: - description: Mode is either Exact, Additive, "" where "" is Additive by default. In Additive mode, any prioritizer not explicitly enumerated is enabled in its default Configurations, in which Steady and Balance prioritizers have the weight of 1 while other prioritizers have the weight of 0. Additive doesn't require configuring all prioritizers. The default Configurations may change in the future, and additional prioritization will happen. In Exact mode, any prioritizer not explicitly enumerated is weighted as zero. Exact requires knowing the full set of prioritizers you want, but avoids behavior changes between releases. - type: string - default: Additive - tolerations: - description: Tolerations are applied to placements, and allow (but do not require) the managed clusters with certain taints to be selected by placements with matching tolerations. - type: array - items: - description: Toleration represents the toleration object that can be attached to a placement. The placement this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - type: object - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSelect, PreferNoSelect and NoSelectIfNew. - type: string - enum: - - NoSelect - - PreferNoSelect - - NoSelectIfNew - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a placement can tolerate all taints of a particular category. - type: string - default: Equal - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoSelect/PreferNoSelect, otherwise this field is ignored) tolerates the taint. The default value is nil, which indicates it tolerates the taint forever. The start time of counting the TolerationSeconds should be the TimeAdded in Taint, not the cluster scheduled time or TolerationSeconds added time. - type: integer - format: int64 - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - maxLength: 1024 - status: - description: Status represents the current status of the Placement - type: object - properties: - conditions: - description: Conditions contains the different condition status for this Placement. - type: array - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - type: object - required: - - lastTransitionTime - - message - - reason - - status - - type - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - type: string - format: date-time - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - type: string - maxLength: 32768 - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - type: integer - format: int64 - minimum: 0 - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - type: string - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - status: - description: status of the condition, one of True, False, Unknown. - type: string - enum: - - "True" - - "False" - - Unknown - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - type: string - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - numberOfSelectedClusters: - description: NumberOfSelectedClusters represents the number of selected ManagedClusters - type: integer - format: int32 - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/0000_04_clusters.open-cluster-management.io_placementdecisions.crd.yaml b/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/0000_04_clusters.open-cluster-management.io_placementdecisions.crd.yaml deleted file mode 100644 index 9cab8eb6d..000000000 --- a/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/0000_04_clusters.open-cluster-management.io_placementdecisions.crd.yaml +++ /dev/null @@ -1,60 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: placementdecisions.cluster.open-cluster-management.io -spec: - group: cluster.open-cluster-management.io - names: - kind: PlacementDecision - listKind: PlacementDecisionList - plural: placementdecisions - singular: placementdecision - scope: Namespaced - preserveUnknownFields: false - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: "PlacementDecision indicates a decision from a placement PlacementDecision should has a label cluster.open-cluster-management.io/placement={placement name} to reference a certain placement. \n If a placement has spec.numberOfClusters specified, the total number of decisions contained in status.decisions of PlacementDecisions should always be NumberOfClusters; otherwise, the total number of decisions should be the number of ManagedClusters which match the placement requirements. \n Some of the decisions might be empty when there are no enough ManagedClusters meet the placement requirements." - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - status: - description: Status represents the current status of the PlacementDecision - type: object - required: - - decisions - properties: - decisions: - description: Decisions is a slice of decisions according to a placement The number of decisions should not be larger than 100 - type: array - items: - description: ClusterDecision represents a decision from a placement An empty ClusterDecision indicates it is not scheduled yet. - type: object - required: - - clusterName - - reason - properties: - clusterName: - description: ClusterName is the name of the ManagedCluster. If it is not empty, its value should be unique cross all placement decisions for the Placement. - type: string - reason: - description: Reason represents the reason why the ManagedCluster is selected. - type: string - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/register.go b/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/register.go index 480f4883f..36b73ce2f 100644 --- a/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/register.go +++ b/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/register.go @@ -32,16 +32,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(GroupVersion, &AddOnPlacementScore{}, &AddOnPlacementScoreList{}, - &ManagedClusterSet{}, - &ManagedClusterSetList{}, - &ManagedClusterSetBinding{}, - &ManagedClusterSetBindingList{}, &ClusterClaim{}, &ClusterClaimList{}, - &Placement{}, - &PlacementList{}, - &PlacementDecision{}, - &PlacementDecisionList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/types.go b/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/types.go index 4be2d776c..044127d20 100644 --- a/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/types.go +++ b/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/types.go @@ -2,110 +2,8 @@ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "open-cluster-management.io/api/cluster/v1" ) -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope="Cluster",shortName={"mclset","mclsets"} - -// ManagedClusterSet defines a group of ManagedClusters that user's workload can run on. -// A workload can be defined to deployed on a ManagedClusterSet, which mean: -// 1. The workload can run on any ManagedCluster in the ManagedClusterSet -// 2. The workload cannot run on any ManagedCluster outside the ManagedClusterSet -// 3. The service exposed by the workload can be shared in any ManagedCluster in the ManagedClusterSet -// -// In order to assign a ManagedCluster to a certian ManagedClusterSet, add a label with name -// `cluster.open-cluster-management.io/clusterset` on the ManagedCluster to refers to the ManagedClusterSet. -// User is not allow to add/remove this label on a ManagedCluster unless they have a RBAC rule to CREATE on -// a virtual subresource of managedclustersets/join. In order to update this label, user must have the permission -// on both the old and new ManagedClusterSet. -type ManagedClusterSet struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the attributes of the ManagedClusterSet - Spec ManagedClusterSetSpec `json:"spec"` - - // Status represents the current status of the ManagedClusterSet - // +optional - Status ManagedClusterSetStatus `json:"status,omitempty"` -} - -// ManagedClusterSetSpec describes the attributes of the ManagedClusterSet -type ManagedClusterSetSpec struct { -} - -// ManagedClusterSetStatus represents the current status of the ManagedClusterSet. -type ManagedClusterSetStatus struct { - // Conditions contains the different condition statuses for this ManagedClusterSet. - Conditions []metav1.Condition `json:"conditions"` -} - -const ( - // ManagedClusterSetConditionEmpty means no ManagedCluster is included in the - // ManagedClusterSet. - ManagedClusterSetConditionEmpty string = "ClusterSetEmpty" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ManagedClusterSetList is a collection of ManagedClusterSet. -type ManagedClusterSetList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - // Items is a list of ManagedClusterSet. - Items []ManagedClusterSet `json:"items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:resource:scope="Namespaced",shortName={"mclsetbinding","mclsetbindings"} - -// ManagedClusterSetBinding projects a ManagedClusterSet into a certain namespace. -// User is able to create a ManagedClusterSetBinding in a namespace and bind it to a -// ManagedClusterSet if they have an RBAC rule to CREATE on the virtual subresource of -// managedclustersets/bind. Workloads created in the same namespace can only be -// distributed to ManagedClusters in ManagedClusterSets bound in this namespace by -// higher level controllers. -type ManagedClusterSetBinding struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the attributes of ManagedClusterSetBinding. - Spec ManagedClusterSetBindingSpec `json:"spec"` -} - -// ManagedClusterSetBindingSpec defines the attributes of ManagedClusterSetBinding. -type ManagedClusterSetBindingSpec struct { - // ClusterSet is the name of the ManagedClusterSet to bind. It must match the - // instance name of the ManagedClusterSetBinding and cannot change once created. - // User is allowed to set this field if they have an RBAC rule to CREATE on the - // virtual subresource of managedclustersets/bind. - // +kubebuilder:validation:MinLength=1 - ClusterSet string `json:"clusterSet"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ManagedClusterSetBindingList is a collection of ManagedClusterSetBinding. -type ManagedClusterSetBindingList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - // Items is a list of ManagedClusterSetBinding. - Items []ManagedClusterSetBinding `json:"items"` -} - // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -160,367 +58,3 @@ var ReservedClusterClaimNames = [...]string{ // product name, like OpenShift, Anthos, EKS and GKE "product.open-cluster-management.io", } - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:resource:scope="Namespaced" -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="Succeeded",type="string",JSONPath=".status.conditions[?(@.type==\"PlacementSatisfied\")].status" -// +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.conditions[?(@.type==\"PlacementSatisfied\")].reason" -// +kubebuilder:printcolumn:name="SelectedClusters",type="integer",JSONPath=".status.numberOfSelectedClusters" - -// Placement defines a rule to select a set of ManagedClusters from the ManagedClusterSets bound -// to the placement namespace. -// -// Here is how the placement policy combines with other selection methods to determine a matching -// list of ManagedClusters: -// 1) Kubernetes clusters are registered with hub as cluster-scoped ManagedClusters; -// 2) ManagedClusters are organized into cluster-scoped ManagedClusterSets; -// 3) ManagedClusterSets are bound to workload namespaces; -// 4) Namespace-scoped Placements specify a slice of ManagedClusterSets which select a working set -// of potential ManagedClusters; -// 5) Then Placements subselect from that working set using label/claim selection. -// -// No ManagedCluster will be selected if no ManagedClusterSet is bound to the placement -// namespace. User is able to bind a ManagedClusterSet to a namespace by creating a -// ManagedClusterSetBinding in that namespace if they have a RBAC rule to CREATE on the virtual -// subresource of `managedclustersets/bind`. -// -// A slice of PlacementDecisions with label cluster.open-cluster-management.io/placement={placement name} -// will be created to represent the ManagedClusters selected by this placement. -// -// If a ManagedCluster is selected and added into the PlacementDecisions, other components may -// apply workload on it; once it is removed from the PlacementDecisions, the workload applied on -// this ManagedCluster should be evicted accordingly. -type Placement struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the attributes of Placement. - // +kubebuilder:validation:Required - // +required - Spec PlacementSpec `json:"spec"` - - // Status represents the current status of the Placement - // +optional - Status PlacementStatus `json:"status,omitempty"` -} - -// PlacementSpec defines the attributes of Placement. -// An empty PlacementSpec selects all ManagedClusters from the ManagedClusterSets bound to -// the placement namespace. The containing fields are ANDed. -type PlacementSpec struct { - // ClusterSets represent the ManagedClusterSets from which the ManagedClusters are selected. - // If the slice is empty, ManagedClusters will be selected from the ManagedClusterSets bound to the placement - // namespace, otherwise ManagedClusters will be selected from the intersection of this slice and the - // ManagedClusterSets bound to the placement namespace. - // +optional - ClusterSets []string `json:"clusterSets,omitempty"` - - // NumberOfClusters represents the desired number of ManagedClusters to be selected which meet the - // placement requirements. - // 1) If not specified, all ManagedClusters which meet the placement requirements (including ClusterSets, - // and Predicates) will be selected; - // 2) Otherwise if the nubmer of ManagedClusters meet the placement requirements is larger than - // NumberOfClusters, a random subset with desired number of ManagedClusters will be selected; - // 3) If the nubmer of ManagedClusters meet the placement requirements is equal to NumberOfClusters, - // all of them will be selected; - // 4) If the nubmer of ManagedClusters meet the placement requirements is less than NumberOfClusters, - // all of them will be selected, and the status of condition `PlacementConditionSatisfied` will be - // set to false; - // +optional - NumberOfClusters *int32 `json:"numberOfClusters,omitempty"` - - // Predicates represent a slice of predicates to select ManagedClusters. The predicates are ORed. - // +optional - Predicates []ClusterPredicate `json:"predicates,omitempty"` - - // PrioritizerPolicy defines the policy of the prioritizers. - // If this field is unset, then default prioritizer mode and configurations are used. - // Referring to PrioritizerPolicy to see more description about Mode and Configurations. - // +optional - PrioritizerPolicy PrioritizerPolicy `json:"prioritizerPolicy"` - - // Tolerations are applied to placements, and allow (but do not require) the managed clusters with - // certain taints to be selected by placements with matching tolerations. - // +optional - Tolerations []Toleration `json:"tolerations,omitempty"` -} - -// ClusterPredicate represents a predicate to select ManagedClusters. -type ClusterPredicate struct { - // RequiredClusterSelector represents a selector of ManagedClusters by label and claim. If specified, - // 1) Any ManagedCluster, which does not match the selector, should not be selected by this ClusterPredicate; - // 2) If a selected ManagedCluster (of this ClusterPredicate) ceases to match the selector (e.g. due to - // an update) of any ClusterPredicate, it will be eventually removed from the placement decisions; - // 3) If a ManagedCluster (not selected previously) starts to match the selector, it will either - // be selected or at least has a chance to be selected (when NumberOfClusters is specified); - // +optional - RequiredClusterSelector ClusterSelector `json:"requiredClusterSelector,omitempty"` -} - -// ClusterSelector represents the AND of the containing selectors. An empty cluster selector matches all objects. -// A null cluster selector matches no objects. -type ClusterSelector struct { - // LabelSelector represents a selector of ManagedClusters by label - // +optional - LabelSelector metav1.LabelSelector `json:"labelSelector,omitempty"` - - // ClaimSelector represents a selector of ManagedClusters by clusterClaims in status - // +optional - ClaimSelector ClusterClaimSelector `json:"claimSelector,omitempty"` -} - -// ClusterClaimSelector is a claim query over a set of ManagedClusters. An empty cluster claim -// selector matches all objects. A null cluster claim selector matches no objects. -type ClusterClaimSelector struct { - // matchExpressions is a list of cluster claim selector requirements. The requirements are ANDed. - // +optional - MatchExpressions []metav1.LabelSelectorRequirement `json:"matchExpressions,omitempty"` -} - -// PrioritizerPolicy represents the policy of prioritizer -type PrioritizerPolicy struct { - // Mode is either Exact, Additive, "" where "" is Additive by default. - // In Additive mode, any prioritizer not explicitly enumerated is enabled in its default Configurations, - // in which Steady and Balance prioritizers have the weight of 1 while other prioritizers have the weight of 0. - // Additive doesn't require configuring all prioritizers. The default Configurations may change in the future, - // and additional prioritization will happen. - // In Exact mode, any prioritizer not explicitly enumerated is weighted as zero. - // Exact requires knowing the full set of prioritizers you want, but avoids behavior changes between releases. - // +kubebuilder:default:=Additive - // +optional - Mode PrioritizerPolicyModeType `json:"mode,omitempty"` - - // +optional - Configurations []PrioritizerConfig `json:"configurations,omitempty"` -} - -// PrioritizerPolicyModeType represents the type of PrioritizerPolicy.Mode -type PrioritizerPolicyModeType string - -const ( - // Valid PrioritizerPolicyModeType value is Exact, Additive. - PrioritizerPolicyModeAdditive PrioritizerPolicyModeType = "Additive" - PrioritizerPolicyModeExact PrioritizerPolicyModeType = "Exact" -) - -// PrioritizerConfig represents the configuration of prioritizer -type PrioritizerConfig struct { - // Name will be removed in v1beta1 and replaced by ScoreCoordinate.BuiltIn. - // If both Name and ScoreCoordinate.BuiltIn are defined, will use the value - // in ScoreCoordinate.BuiltIn. - // Name is the name of a prioritizer. Below are the valid names: - // 1) Balance: balance the decisions among the clusters. - // 2) Steady: ensure the existing decision is stabilized. - // 3) ResourceAllocatableCPU & ResourceAllocatableMemory: sort clusters based on the allocatable. - // +optional - Name string `json:"name,omitempty"` - - // ScoreCoordinate represents the configuration of the prioritizer and score source. - // +optional - ScoreCoordinate *ScoreCoordinate `json:"scoreCoordinate,omitempty"` - - // Weight defines the weight of the prioritizer score. The value must be ranged in [-10,10]. - // Each prioritizer will calculate an integer score of a cluster in the range of [-100, 100]. - // The final score of a cluster will be sum(weight * prioritizer_score). - // A higher weight indicates that the prioritizer weights more in the cluster selection, - // while 0 weight indicates that the prioritizer is disabled. A negative weight indicates - // wants to select the last ones. - // +kubebuilder:validation:Minimum:=-10 - // +kubebuilder:validation:Maximum:=10 - // +kubebuilder:default:=1 - // +optional - Weight int32 `json:"weight,omitempty"` -} - -// ScoreCoordinate represents the configuration of the score type and score source -type ScoreCoordinate struct { - // Type defines the type of the prioritizer score. - // Type is either "BuiltIn", "AddOn" or "", where "" is "BuiltIn" by default. - // When the type is "BuiltIn", need to specify a BuiltIn prioritizer name in BuiltIn. - // When the type is "AddOn", need to configure the score source in AddOn. - // +kubebuilder:validation:Required - // +kubebuilder:validation:Enum=BuiltIn;AddOn - // +kubebuilder:default:=BuiltIn - // +required - Type string `json:"type,omitempty"` - - // BuiltIn defines the name of a BuiltIn prioritizer. Below are the valid BuiltIn prioritizer names. - // 1) Balance: balance the decisions among the clusters. - // 2) Steady: ensure the existing decision is stabilized. - // 3) ResourceAllocatableCPU & ResourceAllocatableMemory: sort clusters based on the allocatable. - // +optional - BuiltIn string `json:"builtIn,omitempty"` - - // When type is "AddOn", AddOn defines the resource name and score name. - // +optional - AddOn *AddOnScore `json:"addOn,omitempty"` -} - -const ( - // Valid ScoreCoordinate type is BuiltIn, AddOn. - ScoreCoordinateTypeBuiltIn string = "BuiltIn" - ScoreCoordinateTypeAddOn string = "AddOn" -) - -// AddOnScore represents the configuration of the addon score source. -type AddOnScore struct { - // ResourceName defines the resource name of the AddOnPlacementScore. - // The placement prioritizer selects AddOnPlacementScore CR by this name. - // +kubebuilder:validation:Required - // +required - ResourceName string `json:"resourceName"` - - // ScoreName defines the score name inside AddOnPlacementScore. - // AddOnPlacementScore contains a list of score name and score value, ScoreName specify the score to be used by - // the prioritizer. - // +kubebuilder:validation:Required - // +required - ScoreName string `json:"scoreName"` -} - -// Toleration represents the toleration object that can be attached to a placement. -// The placement this Toleration is attached to tolerates any taint that matches -// the triple using the matching operator . -type Toleration struct { - // Key is the taint key that the toleration applies to. Empty means match all taint keys. - // If the key is empty, operator must be Exists; this combination means to match all values and all keys. - // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` - // +kubebuilder:validation:MaxLength=316 - // +optional - Key string `json:"key,omitempty"` - // Operator represents a key's relationship to the value. - // Valid operators are Exists and Equal. Defaults to Equal. - // Exists is equivalent to wildcard for value, so that a placement can - // tolerate all taints of a particular category. - // +kubebuilder:default:="Equal" - // +optional - Operator TolerationOperator `json:"operator,omitempty"` - // Value is the taint value the toleration matches to. - // If the operator is Exists, the value should be empty, otherwise just a regular string. - // +kubebuilder:validation:MaxLength=1024 - // +optional - Value string `json:"value,omitempty"` - // Effect indicates the taint effect to match. Empty means match all taint effects. - // When specified, allowed values are NoSelect, PreferNoSelect and NoSelectIfNew. - // +kubebuilder:validation:Enum:=NoSelect;PreferNoSelect;NoSelectIfNew - // +optional - Effect v1.TaintEffect `json:"effect,omitempty"` - // TolerationSeconds represents the period of time the toleration (which must be of effect - // NoSelect/PreferNoSelect, otherwise this field is ignored) tolerates the taint. - // The default value is nil, which indicates it tolerates the taint forever. - // The start time of counting the TolerationSeconds should be the TimeAdded in Taint, not the cluster - // scheduled time or TolerationSeconds added time. - // +optional - TolerationSeconds *int64 `json:"tolerationSeconds,omitempty"` -} - -// TolerationOperator is the set of operators that can be used in a toleration. -type TolerationOperator string - -// These are valid values for TolerationOperator -const ( - TolerationOpExists TolerationOperator = "Exists" - TolerationOpEqual TolerationOperator = "Equal" -) - -type PlacementStatus struct { - // NumberOfSelectedClusters represents the number of selected ManagedClusters - // +optional - NumberOfSelectedClusters int32 `json:"numberOfSelectedClusters"` - - // Conditions contains the different condition status for this Placement. - // +optional - Conditions []metav1.Condition `json:"conditions"` -} - -const ( - // PlacementConditionSatisfied means Placement requirements are satisfied. - // A placement is not satisfied only if there is empty ClusterDecision in the status.decisions - // of PlacementDecisions. - PlacementConditionSatisfied string = "PlacementSatisfied" - // PlacementConditionMisconfigured means Placement configuration is incorrect. - PlacementConditionMisconfigured string = "PlacementMisconfigured" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PlacementList is a collection of Placements. -type PlacementList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - // Items is a list of Placements. - Items []Placement `json:"items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:resource:scope="Namespaced" -// +kubebuilder:subresource:status - -// PlacementDecision indicates a decision from a placement -// PlacementDecision should has a label cluster.open-cluster-management.io/placement={placement name} -// to reference a certain placement. -// -// If a placement has spec.numberOfClusters specified, the total number of decisions contained in -// status.decisions of PlacementDecisions should always be NumberOfClusters; otherwise, the total -// number of decisions should be the number of ManagedClusters which match the placement requirements. -// -// Some of the decisions might be empty when there are no enough ManagedClusters meet the placement -// requirements. -type PlacementDecision struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Status represents the current status of the PlacementDecision - // +optional - Status PlacementDecisionStatus `json:"status,omitempty"` -} - -//The placementDecsion label name holding the placement name -const ( - PlacementLabel string = "cluster.open-cluster-management.io/placement" -) - -// PlacementDecisionStatus represents the current status of the PlacementDecision. -type PlacementDecisionStatus struct { - // Decisions is a slice of decisions according to a placement - // The number of decisions should not be larger than 100 - // +kubebuilder:validation:Required - // +required - Decisions []ClusterDecision `json:"decisions"` -} - -// ClusterDecision represents a decision from a placement -// An empty ClusterDecision indicates it is not scheduled yet. -type ClusterDecision struct { - // ClusterName is the name of the ManagedCluster. If it is not empty, its value should be unique cross all - // placement decisions for the Placement. - // +kubebuilder:validation:Required - // +required - ClusterName string `json:"clusterName"` - - // Reason represents the reason why the ManagedCluster is selected. - // +kubebuilder:validation:Required - // +required - Reason string `json:"reason"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterDecisionList is a collection of PlacementDecision. -type PlacementDecisionList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - // Items is a list of PlacementDecision. - Items []PlacementDecision `json:"items"` -} diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.deepcopy.go b/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.deepcopy.go index 5f41cf881..fd4fa6bee 100644 --- a/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.deepcopy.go +++ b/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.deepcopy.go @@ -118,22 +118,6 @@ func (in *AddOnPlacementScoreStatus) DeepCopy() *AddOnPlacementScoreStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AddOnScore) DeepCopyInto(out *AddOnScore) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOnScore. -func (in *AddOnScore) DeepCopy() *AddOnScore { - if in == nil { - return nil - } - out := new(AddOnScore) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterClaim) DeepCopyInto(out *ClusterClaim) { *out = *in @@ -194,29 +178,6 @@ func (in *ClusterClaimList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterClaimSelector) DeepCopyInto(out *ClusterClaimSelector) { - *out = *in - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]v1.LabelSelectorRequirement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClaimSelector. -func (in *ClusterClaimSelector) DeepCopy() *ClusterClaimSelector { - if in == nil { - return nil - } - out := new(ClusterClaimSelector) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterClaimSpec) DeepCopyInto(out *ClusterClaimSpec) { *out = *in @@ -232,522 +193,3 @@ func (in *ClusterClaimSpec) DeepCopy() *ClusterClaimSpec { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterDecision) DeepCopyInto(out *ClusterDecision) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDecision. -func (in *ClusterDecision) DeepCopy() *ClusterDecision { - if in == nil { - return nil - } - out := new(ClusterDecision) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterPredicate) DeepCopyInto(out *ClusterPredicate) { - *out = *in - in.RequiredClusterSelector.DeepCopyInto(&out.RequiredClusterSelector) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPredicate. -func (in *ClusterPredicate) DeepCopy() *ClusterPredicate { - if in == nil { - return nil - } - out := new(ClusterPredicate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterSelector) DeepCopyInto(out *ClusterSelector) { - *out = *in - in.LabelSelector.DeepCopyInto(&out.LabelSelector) - in.ClaimSelector.DeepCopyInto(&out.ClaimSelector) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSelector. -func (in *ClusterSelector) DeepCopy() *ClusterSelector { - if in == nil { - return nil - } - out := new(ClusterSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedClusterSet) DeepCopyInto(out *ManagedClusterSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSet. -func (in *ManagedClusterSet) DeepCopy() *ManagedClusterSet { - if in == nil { - return nil - } - out := new(ManagedClusterSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ManagedClusterSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedClusterSetBinding) DeepCopyInto(out *ManagedClusterSetBinding) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetBinding. -func (in *ManagedClusterSetBinding) DeepCopy() *ManagedClusterSetBinding { - if in == nil { - return nil - } - out := new(ManagedClusterSetBinding) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ManagedClusterSetBinding) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedClusterSetBindingList) DeepCopyInto(out *ManagedClusterSetBindingList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ManagedClusterSetBinding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetBindingList. -func (in *ManagedClusterSetBindingList) DeepCopy() *ManagedClusterSetBindingList { - if in == nil { - return nil - } - out := new(ManagedClusterSetBindingList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ManagedClusterSetBindingList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedClusterSetBindingSpec) DeepCopyInto(out *ManagedClusterSetBindingSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetBindingSpec. -func (in *ManagedClusterSetBindingSpec) DeepCopy() *ManagedClusterSetBindingSpec { - if in == nil { - return nil - } - out := new(ManagedClusterSetBindingSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedClusterSetList) DeepCopyInto(out *ManagedClusterSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ManagedClusterSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetList. -func (in *ManagedClusterSetList) DeepCopy() *ManagedClusterSetList { - if in == nil { - return nil - } - out := new(ManagedClusterSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ManagedClusterSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedClusterSetSpec) DeepCopyInto(out *ManagedClusterSetSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetSpec. -func (in *ManagedClusterSetSpec) DeepCopy() *ManagedClusterSetSpec { - if in == nil { - return nil - } - out := new(ManagedClusterSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedClusterSetStatus) DeepCopyInto(out *ManagedClusterSetStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetStatus. -func (in *ManagedClusterSetStatus) DeepCopy() *ManagedClusterSetStatus { - if in == nil { - return nil - } - out := new(ManagedClusterSetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Placement) DeepCopyInto(out *Placement) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Placement. -func (in *Placement) DeepCopy() *Placement { - if in == nil { - return nil - } - out := new(Placement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Placement) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PlacementDecision) DeepCopyInto(out *PlacementDecision) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementDecision. -func (in *PlacementDecision) DeepCopy() *PlacementDecision { - if in == nil { - return nil - } - out := new(PlacementDecision) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PlacementDecision) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PlacementDecisionList) DeepCopyInto(out *PlacementDecisionList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PlacementDecision, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementDecisionList. -func (in *PlacementDecisionList) DeepCopy() *PlacementDecisionList { - if in == nil { - return nil - } - out := new(PlacementDecisionList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PlacementDecisionList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PlacementDecisionStatus) DeepCopyInto(out *PlacementDecisionStatus) { - *out = *in - if in.Decisions != nil { - in, out := &in.Decisions, &out.Decisions - *out = make([]ClusterDecision, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementDecisionStatus. -func (in *PlacementDecisionStatus) DeepCopy() *PlacementDecisionStatus { - if in == nil { - return nil - } - out := new(PlacementDecisionStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PlacementList) DeepCopyInto(out *PlacementList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Placement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementList. -func (in *PlacementList) DeepCopy() *PlacementList { - if in == nil { - return nil - } - out := new(PlacementList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PlacementList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PlacementSpec) DeepCopyInto(out *PlacementSpec) { - *out = *in - if in.ClusterSets != nil { - in, out := &in.ClusterSets, &out.ClusterSets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.NumberOfClusters != nil { - in, out := &in.NumberOfClusters, &out.NumberOfClusters - *out = new(int32) - **out = **in - } - if in.Predicates != nil { - in, out := &in.Predicates, &out.Predicates - *out = make([]ClusterPredicate, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.PrioritizerPolicy.DeepCopyInto(&out.PrioritizerPolicy) - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementSpec. -func (in *PlacementSpec) DeepCopy() *PlacementSpec { - if in == nil { - return nil - } - out := new(PlacementSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PlacementStatus) DeepCopyInto(out *PlacementStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementStatus. -func (in *PlacementStatus) DeepCopy() *PlacementStatus { - if in == nil { - return nil - } - out := new(PlacementStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrioritizerConfig) DeepCopyInto(out *PrioritizerConfig) { - *out = *in - if in.ScoreCoordinate != nil { - in, out := &in.ScoreCoordinate, &out.ScoreCoordinate - *out = new(ScoreCoordinate) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrioritizerConfig. -func (in *PrioritizerConfig) DeepCopy() *PrioritizerConfig { - if in == nil { - return nil - } - out := new(PrioritizerConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrioritizerPolicy) DeepCopyInto(out *PrioritizerPolicy) { - *out = *in - if in.Configurations != nil { - in, out := &in.Configurations, &out.Configurations - *out = make([]PrioritizerConfig, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrioritizerPolicy. -func (in *PrioritizerPolicy) DeepCopy() *PrioritizerPolicy { - if in == nil { - return nil - } - out := new(PrioritizerPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScoreCoordinate) DeepCopyInto(out *ScoreCoordinate) { - *out = *in - if in.AddOn != nil { - in, out := &in.AddOn, &out.AddOn - *out = new(AddOnScore) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScoreCoordinate. -func (in *ScoreCoordinate) DeepCopy() *ScoreCoordinate { - if in == nil { - return nil - } - out := new(ScoreCoordinate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Toleration) DeepCopyInto(out *Toleration) { - *out = *in - if in.TolerationSeconds != nil { - in, out := &in.TolerationSeconds, &out.TolerationSeconds - *out = new(int64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration. -func (in *Toleration) DeepCopy() *Toleration { - if in == nil { - return nil - } - out := new(Toleration) - in.DeepCopyInto(out) - return out -} diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.swagger_doc_generated.go b/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.swagger_doc_generated.go index bbf170962..f1c81946d 100644 --- a/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/controlplane/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.swagger_doc_generated.go @@ -11,16 +11,6 @@ package v1alpha1 // Those methods can be generated by using hack/update-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE -var map_AddOnScore = map[string]string{ - "": "AddOnScore represents the configuration of the addon score source.", - "resourceName": "ResourceName defines the resource name of the AddOnPlacementScore. The placement prioritizer selects AddOnPlacementScore CR by this name.", - "scoreName": "ScoreName defines the score name inside AddOnPlacementScore. AddOnPlacementScore contains a list of score name and score value, ScoreName specify the score to be used by the prioritizer.", -} - -func (AddOnScore) SwaggerDoc() map[string]string { - return map_AddOnScore -} - var map_ClusterClaim = map[string]string{ "": "ClusterClaim represents cluster information that a managed cluster claims ClusterClaims with well known names include,\n 1. id.k8s.io, it contains a unique identifier for the cluster.\n 2. clusterset.k8s.io, it contains an identifier that relates the cluster\n to the ClusterSet in which it belongs.\nClusterClaims created on a managed cluster will be collected and saved into the status of the corresponding ManagedCluster on hub.", "spec": "Spec defines the attributes of the ClusterClaim.", @@ -40,15 +30,6 @@ func (ClusterClaimList) SwaggerDoc() map[string]string { return map_ClusterClaimList } -var map_ClusterClaimSelector = map[string]string{ - "": "ClusterClaimSelector is a claim query over a set of ManagedClusters. An empty cluster claim selector matches all objects. A null cluster claim selector matches no objects.", - "matchExpressions": "matchExpressions is a list of cluster claim selector requirements. The requirements are ANDed.", -} - -func (ClusterClaimSelector) SwaggerDoc() map[string]string { - return map_ClusterClaimSelector -} - var map_ClusterClaimSpec = map[string]string{ "value": "Value is a claim-dependent string", } @@ -57,214 +38,6 @@ func (ClusterClaimSpec) SwaggerDoc() map[string]string { return map_ClusterClaimSpec } -var map_ClusterDecision = map[string]string{ - "": "ClusterDecision represents a decision from a placement An empty ClusterDecision indicates it is not scheduled yet.", - "clusterName": "ClusterName is the name of the ManagedCluster. If it is not empty, its value should be unique cross all placement decisions for the Placement.", - "reason": "Reason represents the reason why the ManagedCluster is selected.", -} - -func (ClusterDecision) SwaggerDoc() map[string]string { - return map_ClusterDecision -} - -var map_ClusterPredicate = map[string]string{ - "": "ClusterPredicate represents a predicate to select ManagedClusters.", - "requiredClusterSelector": "RequiredClusterSelector represents a selector of ManagedClusters by label and claim. If specified, 1) Any ManagedCluster, which does not match the selector, should not be selected by this ClusterPredicate; 2) If a selected ManagedCluster (of this ClusterPredicate) ceases to match the selector (e.g. due to\n an update) of any ClusterPredicate, it will be eventually removed from the placement decisions;\n3) If a ManagedCluster (not selected previously) starts to match the selector, it will either\n be selected or at least has a chance to be selected (when NumberOfClusters is specified);", -} - -func (ClusterPredicate) SwaggerDoc() map[string]string { - return map_ClusterPredicate -} - -var map_ClusterSelector = map[string]string{ - "": "ClusterSelector represents the AND of the containing selectors. An empty cluster selector matches all objects. A null cluster selector matches no objects.", - "labelSelector": "LabelSelector represents a selector of ManagedClusters by label", - "claimSelector": "ClaimSelector represents a selector of ManagedClusters by clusterClaims in status", -} - -func (ClusterSelector) SwaggerDoc() map[string]string { - return map_ClusterSelector -} - -var map_ManagedClusterSet = map[string]string{ - "": "ManagedClusterSet defines a group of ManagedClusters that user's workload can run on. A workload can be defined to deployed on a ManagedClusterSet, which mean:\n 1. The workload can run on any ManagedCluster in the ManagedClusterSet\n 2. The workload cannot run on any ManagedCluster outside the ManagedClusterSet\n 3. The service exposed by the workload can be shared in any ManagedCluster in the ManagedClusterSet\n\nIn order to assign a ManagedCluster to a certian ManagedClusterSet, add a label with name `cluster.open-cluster-management.io/clusterset` on the ManagedCluster to refers to the ManagedClusterSet. User is not allow to add/remove this label on a ManagedCluster unless they have a RBAC rule to CREATE on a virtual subresource of managedclustersets/join. In order to update this label, user must have the permission on both the old and new ManagedClusterSet.", - "spec": "Spec defines the attributes of the ManagedClusterSet", - "status": "Status represents the current status of the ManagedClusterSet", -} - -func (ManagedClusterSet) SwaggerDoc() map[string]string { - return map_ManagedClusterSet -} - -var map_ManagedClusterSetBinding = map[string]string{ - "": "ManagedClusterSetBinding projects a ManagedClusterSet into a certain namespace. User is able to create a ManagedClusterSetBinding in a namespace and bind it to a ManagedClusterSet if they have an RBAC rule to CREATE on the virtual subresource of managedclustersets/bind. Workloads created in the same namespace can only be distributed to ManagedClusters in ManagedClusterSets bound in this namespace by higher level controllers.", - "spec": "Spec defines the attributes of ManagedClusterSetBinding.", -} - -func (ManagedClusterSetBinding) SwaggerDoc() map[string]string { - return map_ManagedClusterSetBinding -} - -var map_ManagedClusterSetBindingList = map[string]string{ - "": "ManagedClusterSetBindingList is a collection of ManagedClusterSetBinding.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "Items is a list of ManagedClusterSetBinding.", -} - -func (ManagedClusterSetBindingList) SwaggerDoc() map[string]string { - return map_ManagedClusterSetBindingList -} - -var map_ManagedClusterSetBindingSpec = map[string]string{ - "": "ManagedClusterSetBindingSpec defines the attributes of ManagedClusterSetBinding.", - "clusterSet": "ClusterSet is the name of the ManagedClusterSet to bind. It must match the instance name of the ManagedClusterSetBinding and cannot change once created. User is allowed to set this field if they have an RBAC rule to CREATE on the virtual subresource of managedclustersets/bind.", -} - -func (ManagedClusterSetBindingSpec) SwaggerDoc() map[string]string { - return map_ManagedClusterSetBindingSpec -} - -var map_ManagedClusterSetList = map[string]string{ - "": "ManagedClusterSetList is a collection of ManagedClusterSet.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "Items is a list of ManagedClusterSet.", -} - -func (ManagedClusterSetList) SwaggerDoc() map[string]string { - return map_ManagedClusterSetList -} - -var map_ManagedClusterSetSpec = map[string]string{ - "": "ManagedClusterSetSpec describes the attributes of the ManagedClusterSet", -} - -func (ManagedClusterSetSpec) SwaggerDoc() map[string]string { - return map_ManagedClusterSetSpec -} - -var map_ManagedClusterSetStatus = map[string]string{ - "": "ManagedClusterSetStatus represents the current status of the ManagedClusterSet.", - "conditions": "Conditions contains the different condition statuses for this ManagedClusterSet.", -} - -func (ManagedClusterSetStatus) SwaggerDoc() map[string]string { - return map_ManagedClusterSetStatus -} - -var map_Placement = map[string]string{ - "": "Placement defines a rule to select a set of ManagedClusters from the ManagedClusterSets bound to the placement namespace.\n\nHere is how the placement policy combines with other selection methods to determine a matching list of ManagedClusters: 1) Kubernetes clusters are registered with hub as cluster-scoped ManagedClusters; 2) ManagedClusters are organized into cluster-scoped ManagedClusterSets; 3) ManagedClusterSets are bound to workload namespaces; 4) Namespace-scoped Placements specify a slice of ManagedClusterSets which select a working set\n of potential ManagedClusters;\n5) Then Placements subselect from that working set using label/claim selection.\n\nNo ManagedCluster will be selected if no ManagedClusterSet is bound to the placement namespace. User is able to bind a ManagedClusterSet to a namespace by creating a ManagedClusterSetBinding in that namespace if they have a RBAC rule to CREATE on the virtual subresource of `managedclustersets/bind`.\n\nA slice of PlacementDecisions with label cluster.open-cluster-management.io/placement={placement name} will be created to represent the ManagedClusters selected by this placement.\n\nIf a ManagedCluster is selected and added into the PlacementDecisions, other components may apply workload on it; once it is removed from the PlacementDecisions, the workload applied on this ManagedCluster should be evicted accordingly.", - "spec": "Spec defines the attributes of Placement.", - "status": "Status represents the current status of the Placement", -} - -func (Placement) SwaggerDoc() map[string]string { - return map_Placement -} - -var map_PlacementDecision = map[string]string{ - "": "PlacementDecision indicates a decision from a placement PlacementDecision should has a label cluster.open-cluster-management.io/placement={placement name} to reference a certain placement.\n\nIf a placement has spec.numberOfClusters specified, the total number of decisions contained in status.decisions of PlacementDecisions should always be NumberOfClusters; otherwise, the total number of decisions should be the number of ManagedClusters which match the placement requirements.\n\nSome of the decisions might be empty when there are no enough ManagedClusters meet the placement requirements.", - "status": "Status represents the current status of the PlacementDecision", -} - -func (PlacementDecision) SwaggerDoc() map[string]string { - return map_PlacementDecision -} - -var map_PlacementDecisionList = map[string]string{ - "": "ClusterDecisionList is a collection of PlacementDecision.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "Items is a list of PlacementDecision.", -} - -func (PlacementDecisionList) SwaggerDoc() map[string]string { - return map_PlacementDecisionList -} - -var map_PlacementDecisionStatus = map[string]string{ - "": "PlacementDecisionStatus represents the current status of the PlacementDecision.", - "decisions": "Decisions is a slice of decisions according to a placement The number of decisions should not be larger than 100", -} - -func (PlacementDecisionStatus) SwaggerDoc() map[string]string { - return map_PlacementDecisionStatus -} - -var map_PlacementList = map[string]string{ - "": "PlacementList is a collection of Placements.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "Items is a list of Placements.", -} - -func (PlacementList) SwaggerDoc() map[string]string { - return map_PlacementList -} - -var map_PlacementSpec = map[string]string{ - "": "PlacementSpec defines the attributes of Placement. An empty PlacementSpec selects all ManagedClusters from the ManagedClusterSets bound to the placement namespace. The containing fields are ANDed.", - "clusterSets": "ClusterSets represent the ManagedClusterSets from which the ManagedClusters are selected. If the slice is empty, ManagedClusters will be selected from the ManagedClusterSets bound to the placement namespace, otherwise ManagedClusters will be selected from the intersection of this slice and the ManagedClusterSets bound to the placement namespace.", - "numberOfClusters": "NumberOfClusters represents the desired number of ManagedClusters to be selected which meet the placement requirements. 1) If not specified, all ManagedClusters which meet the placement requirements (including ClusterSets,\n and Predicates) will be selected;\n2) Otherwise if the nubmer of ManagedClusters meet the placement requirements is larger than\n NumberOfClusters, a random subset with desired number of ManagedClusters will be selected;\n3) If the nubmer of ManagedClusters meet the placement requirements is equal to NumberOfClusters,\n all of them will be selected;\n4) If the nubmer of ManagedClusters meet the placement requirements is less than NumberOfClusters,\n all of them will be selected, and the status of condition `PlacementConditionSatisfied` will be\n set to false;", - "predicates": "Predicates represent a slice of predicates to select ManagedClusters. The predicates are ORed.", - "prioritizerPolicy": "PrioritizerPolicy defines the policy of the prioritizers. If this field is unset, then default prioritizer mode and configurations are used. Referring to PrioritizerPolicy to see more description about Mode and Configurations.", - "tolerations": "Tolerations are applied to placements, and allow (but do not require) the managed clusters with certain taints to be selected by placements with matching tolerations.", -} - -func (PlacementSpec) SwaggerDoc() map[string]string { - return map_PlacementSpec -} - -var map_PlacementStatus = map[string]string{ - "numberOfSelectedClusters": "NumberOfSelectedClusters represents the number of selected ManagedClusters", - "conditions": "Conditions contains the different condition status for this Placement.", -} - -func (PlacementStatus) SwaggerDoc() map[string]string { - return map_PlacementStatus -} - -var map_PrioritizerConfig = map[string]string{ - "": "PrioritizerConfig represents the configuration of prioritizer", - "name": "Name will be removed in v1beta1 and replaced by ScoreCoordinate.BuiltIn. If both Name and ScoreCoordinate.BuiltIn are defined, will use the value in ScoreCoordinate.BuiltIn. Name is the name of a prioritizer. Below are the valid names: 1) Balance: balance the decisions among the clusters. 2) Steady: ensure the existing decision is stabilized. 3) ResourceAllocatableCPU & ResourceAllocatableMemory: sort clusters based on the allocatable.", - "scoreCoordinate": "ScoreCoordinate represents the configuration of the prioritizer and score source.", - "weight": "Weight defines the weight of the prioritizer score. The value must be ranged in [-10,10]. Each prioritizer will calculate an integer score of a cluster in the range of [-100, 100]. The final score of a cluster will be sum(weight * prioritizer_score). A higher weight indicates that the prioritizer weights more in the cluster selection, while 0 weight indicates that the prioritizer is disabled. A negative weight indicates wants to select the last ones.", -} - -func (PrioritizerConfig) SwaggerDoc() map[string]string { - return map_PrioritizerConfig -} - -var map_PrioritizerPolicy = map[string]string{ - "": "PrioritizerPolicy represents the policy of prioritizer", - "mode": "Mode is either Exact, Additive, \"\" where \"\" is Additive by default. In Additive mode, any prioritizer not explicitly enumerated is enabled in its default Configurations, in which Steady and Balance prioritizers have the weight of 1 while other prioritizers have the weight of 0. Additive doesn't require configuring all prioritizers. The default Configurations may change in the future, and additional prioritization will happen. In Exact mode, any prioritizer not explicitly enumerated is weighted as zero. Exact requires knowing the full set of prioritizers you want, but avoids behavior changes between releases.", -} - -func (PrioritizerPolicy) SwaggerDoc() map[string]string { - return map_PrioritizerPolicy -} - -var map_ScoreCoordinate = map[string]string{ - "": "ScoreCoordinate represents the configuration of the score type and score source", - "type": "Type defines the type of the prioritizer score. Type is either \"BuiltIn\", \"AddOn\" or \"\", where \"\" is \"BuiltIn\" by default. When the type is \"BuiltIn\", need to specify a BuiltIn prioritizer name in BuiltIn. When the type is \"AddOn\", need to configure the score source in AddOn.", - "builtIn": "BuiltIn defines the name of a BuiltIn prioritizer. Below are the valid BuiltIn prioritizer names. 1) Balance: balance the decisions among the clusters. 2) Steady: ensure the existing decision is stabilized. 3) ResourceAllocatableCPU & ResourceAllocatableMemory: sort clusters based on the allocatable.", - "addOn": "When type is \"AddOn\", AddOn defines the resource name and score name.", -} - -func (ScoreCoordinate) SwaggerDoc() map[string]string { - return map_ScoreCoordinate -} - -var map_Toleration = map[string]string{ - "": "Toleration represents the toleration object that can be attached to a placement. The placement this Toleration is attached to tolerates any taint that matches the triple using the matching operator .", - "key": "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.", - "operator": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a placement can tolerate all taints of a particular category.", - "value": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", - "effect": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSelect, PreferNoSelect and NoSelectIfNew.", - "tolerationSeconds": "TolerationSeconds represents the period of time the toleration (which must be of effect NoSelect/PreferNoSelect, otherwise this field is ignored) tolerates the taint. The default value is nil, which indicates it tolerates the taint forever. The start time of counting the TolerationSeconds should be the TimeAdded in Taint, not the cluster scheduled time or TolerationSeconds added time.", -} - -func (Toleration) SwaggerDoc() map[string]string { - return map_Toleration -} - var map_AddOnPlacementScore = map[string]string{ "": "AddOnPlacementScore represents a bundle of scores of one managed cluster, which could be used by placement. AddOnPlacementScore is a namespace scoped resource. The namespace of the resource is the cluster namespace.", "status": "Status represents the status of the AddOnPlacementScore.", diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml index a898cc89c..a543045ff 100644 --- a/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml +++ b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml @@ -15,77 +15,6 @@ spec: scope: Cluster preserveUnknownFields: false versions: - - name: v1alpha1 - deprecated: true - deprecationWarning: "cluster.open-cluster-management.io/v1alpha1 ManagedClusterSet is deprecated; use cluster.open-cluster-management.io/v1beta1 ManagedClusterSet" - schema: - openAPIV3Schema: - description: "ManagedClusterSet defines a group of ManagedClusters that user's workload can run on. A workload can be defined to deployed on a ManagedClusterSet, which mean: 1. The workload can run on any ManagedCluster in the ManagedClusterSet 2. The workload cannot run on any ManagedCluster outside the ManagedClusterSet 3. The service exposed by the workload can be shared in any ManagedCluster in the ManagedClusterSet \n In order to assign a ManagedCluster to a certian ManagedClusterSet, add a label with name `cluster.open-cluster-management.io/clusterset` on the ManagedCluster to refers to the ManagedClusterSet. User is not allow to add/remove this label on a ManagedCluster unless they have a RBAC rule to CREATE on a virtual subresource of managedclustersets/join. In order to update this label, user must have the permission on both the old and new ManagedClusterSet." - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec defines the attributes of the ManagedClusterSet - type: object - status: - description: Status represents the current status of the ManagedClusterSet - type: object - properties: - conditions: - description: Conditions contains the different condition statuses for this ManagedClusterSet. - type: array - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - type: object - required: - - lastTransitionTime - - message - - reason - - status - - type - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - type: string - format: date-time - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - type: string - maxLength: 32768 - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - type: integer - format: int64 - minimum: 0 - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - type: string - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - status: - description: status of the condition, one of True, False, Unknown. - type: string - enum: - - "True" - - "False" - - Unknown - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - type: string - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - served: true - storage: false - subresources: - status: {} - additionalPrinterColumns: - jsonPath: .status.conditions[?(@.type=="ClusterSetEmpty")].status name: Empty diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml index 86f1c49dd..7a2fc0eb5 100644 --- a/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml +++ b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml @@ -15,32 +15,6 @@ spec: scope: Namespaced preserveUnknownFields: false versions: - - name: v1alpha1 - deprecated: true - deprecationWarning: "cluster.open-cluster-management.io/v1alpha1 ManagedClusterSetBinding is deprecated; use cluster.open-cluster-management.io/v1beta1 ManagedClusterSetBinding" - schema: - openAPIV3Schema: - description: ManagedClusterSetBinding projects a ManagedClusterSet into a certain namespace. User is able to create a ManagedClusterSetBinding in a namespace and bind it to a ManagedClusterSet if they have an RBAC rule to CREATE on the virtual subresource of managedclustersets/bind. Workloads created in the same namespace can only be distributed to ManagedClusters in ManagedClusterSets bound in this namespace by higher level controllers. - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec defines the attributes of ManagedClusterSetBinding. - type: object - properties: - clusterSet: - description: ClusterSet is the name of the ManagedClusterSet to bind. It must match the instance name of the ManagedClusterSetBinding and cannot change once created. User is allowed to set this field if they have an RBAC rule to CREATE on the virtual subresource of managedclustersets/bind. - type: string - minLength: 1 - served: true - storage: false - name: v1beta1 schema: openAPIV3Schema: diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_02_clusters.open-cluster-management.io_placements.crd.yaml b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_02_clusters.open-cluster-management.io_placements.crd.yaml index 639d34cc5..4c22187b6 100644 --- a/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_02_clusters.open-cluster-management.io_placements.crd.yaml +++ b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_02_clusters.open-cluster-management.io_placements.crd.yaml @@ -12,253 +12,6 @@ spec: scope: Namespaced preserveUnknownFields: false versions: - - additionalPrinterColumns: - - jsonPath: .status.conditions[?(@.type=="PlacementSatisfied")].status - name: Succeeded - type: string - - jsonPath: .status.conditions[?(@.type=="PlacementSatisfied")].reason - name: Reason - type: string - - jsonPath: .status.numberOfSelectedClusters - name: SelectedClusters - type: integer - name: v1alpha1 - deprecated: true - deprecationWarning: "cluster.open-cluster-management.io/v1alpha1 Placement is deprecated; use cluster.open-cluster-management.io/v1beta1 Placement" - schema: - openAPIV3Schema: - description: "Placement defines a rule to select a set of ManagedClusters from the ManagedClusterSets bound to the placement namespace. \n Here is how the placement policy combines with other selection methods to determine a matching list of ManagedClusters: 1) Kubernetes clusters are registered with hub as cluster-scoped ManagedClusters; 2) ManagedClusters are organized into cluster-scoped ManagedClusterSets; 3) ManagedClusterSets are bound to workload namespaces; 4) Namespace-scoped Placements specify a slice of ManagedClusterSets which select a working set of potential ManagedClusters; 5) Then Placements subselect from that working set using label/claim selection. \n No ManagedCluster will be selected if no ManagedClusterSet is bound to the placement namespace. User is able to bind a ManagedClusterSet to a namespace by creating a ManagedClusterSetBinding in that namespace if they have a RBAC rule to CREATE on the virtual subresource of `managedclustersets/bind`. \n A slice of PlacementDecisions with label cluster.open-cluster-management.io/placement={placement name} will be created to represent the ManagedClusters selected by this placement. \n If a ManagedCluster is selected and added into the PlacementDecisions, other components may apply workload on it; once it is removed from the PlacementDecisions, the workload applied on this ManagedCluster should be evicted accordingly." - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec defines the attributes of Placement. - type: object - properties: - clusterSets: - description: ClusterSets represent the ManagedClusterSets from which the ManagedClusters are selected. If the slice is empty, ManagedClusters will be selected from the ManagedClusterSets bound to the placement namespace, otherwise ManagedClusters will be selected from the intersection of this slice and the ManagedClusterSets bound to the placement namespace. - type: array - items: - type: string - numberOfClusters: - description: NumberOfClusters represents the desired number of ManagedClusters to be selected which meet the placement requirements. 1) If not specified, all ManagedClusters which meet the placement requirements (including ClusterSets, and Predicates) will be selected; 2) Otherwise if the nubmer of ManagedClusters meet the placement requirements is larger than NumberOfClusters, a random subset with desired number of ManagedClusters will be selected; 3) If the nubmer of ManagedClusters meet the placement requirements is equal to NumberOfClusters, all of them will be selected; 4) If the nubmer of ManagedClusters meet the placement requirements is less than NumberOfClusters, all of them will be selected, and the status of condition `PlacementConditionSatisfied` will be set to false; - type: integer - format: int32 - predicates: - description: Predicates represent a slice of predicates to select ManagedClusters. The predicates are ORed. - type: array - items: - description: ClusterPredicate represents a predicate to select ManagedClusters. - type: object - properties: - requiredClusterSelector: - description: RequiredClusterSelector represents a selector of ManagedClusters by label and claim. If specified, 1) Any ManagedCluster, which does not match the selector, should not be selected by this ClusterPredicate; 2) If a selected ManagedCluster (of this ClusterPredicate) ceases to match the selector (e.g. due to an update) of any ClusterPredicate, it will be eventually removed from the placement decisions; 3) If a ManagedCluster (not selected previously) starts to match the selector, it will either be selected or at least has a chance to be selected (when NumberOfClusters is specified); - type: object - properties: - claimSelector: - description: ClaimSelector represents a selector of ManagedClusters by clusterClaims in status - type: object - properties: - matchExpressions: - description: matchExpressions is a list of cluster claim selector requirements. The requirements are ANDed. - type: array - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - type: object - required: - - key - - operator - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - type: array - items: - type: string - labelSelector: - description: LabelSelector represents a selector of ManagedClusters by label - type: object - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - type: array - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - type: object - required: - - key - - operator - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - type: array - items: - type: string - matchLabels: - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - additionalProperties: - type: string - prioritizerPolicy: - description: PrioritizerPolicy defines the policy of the prioritizers. If this field is unset, then default prioritizer mode and configurations are used. Referring to PrioritizerPolicy to see more description about Mode and Configurations. - type: object - properties: - configurations: - type: array - items: - description: PrioritizerConfig represents the configuration of prioritizer - type: object - properties: - name: - description: 'Name will be removed in v1beta1 and replaced by ScoreCoordinate.BuiltIn. If both Name and ScoreCoordinate.BuiltIn are defined, will use the value in ScoreCoordinate.BuiltIn. Name is the name of a prioritizer. Below are the valid names: 1) Balance: balance the decisions among the clusters. 2) Steady: ensure the existing decision is stabilized. 3) ResourceAllocatableCPU & ResourceAllocatableMemory: sort clusters based on the allocatable.' - type: string - scoreCoordinate: - description: ScoreCoordinate represents the configuration of the prioritizer and score source. - type: object - required: - - type - properties: - addOn: - description: When type is "AddOn", AddOn defines the resource name and score name. - type: object - required: - - resourceName - - scoreName - properties: - resourceName: - description: ResourceName defines the resource name of the AddOnPlacementScore. The placement prioritizer selects AddOnPlacementScore CR by this name. - type: string - scoreName: - description: ScoreName defines the score name inside AddOnPlacementScore. AddOnPlacementScore contains a list of score name and score value, ScoreName specify the score to be used by the prioritizer. - type: string - builtIn: - description: 'BuiltIn defines the name of a BuiltIn prioritizer. Below are the valid BuiltIn prioritizer names. 1) Balance: balance the decisions among the clusters. 2) Steady: ensure the existing decision is stabilized. 3) ResourceAllocatableCPU & ResourceAllocatableMemory: sort clusters based on the allocatable.' - type: string - type: - description: Type defines the type of the prioritizer score. Type is either "BuiltIn", "AddOn" or "", where "" is "BuiltIn" by default. When the type is "BuiltIn", need to specify a BuiltIn prioritizer name in BuiltIn. When the type is "AddOn", need to configure the score source in AddOn. - type: string - default: BuiltIn - enum: - - BuiltIn - - AddOn - weight: - description: Weight defines the weight of the prioritizer score. The value must be ranged in [-10,10]. Each prioritizer will calculate an integer score of a cluster in the range of [-100, 100]. The final score of a cluster will be sum(weight * prioritizer_score). A higher weight indicates that the prioritizer weights more in the cluster selection, while 0 weight indicates that the prioritizer is disabled. A negative weight indicates wants to select the last ones. - type: integer - format: int32 - default: 1 - maximum: 10 - minimum: -10 - mode: - description: Mode is either Exact, Additive, "" where "" is Additive by default. In Additive mode, any prioritizer not explicitly enumerated is enabled in its default Configurations, in which Steady and Balance prioritizers have the weight of 1 while other prioritizers have the weight of 0. Additive doesn't require configuring all prioritizers. The default Configurations may change in the future, and additional prioritization will happen. In Exact mode, any prioritizer not explicitly enumerated is weighted as zero. Exact requires knowing the full set of prioritizers you want, but avoids behavior changes between releases. - type: string - default: Additive - tolerations: - description: Tolerations are applied to placements, and allow (but do not require) the managed clusters with certain taints to be selected by placements with matching tolerations. - type: array - items: - description: Toleration represents the toleration object that can be attached to a placement. The placement this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - type: object - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSelect, PreferNoSelect and NoSelectIfNew. - type: string - enum: - - NoSelect - - PreferNoSelect - - NoSelectIfNew - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a placement can tolerate all taints of a particular category. - type: string - default: Equal - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoSelect/PreferNoSelect, otherwise this field is ignored) tolerates the taint. The default value is nil, which indicates it tolerates the taint forever. The start time of counting the TolerationSeconds should be the TimeAdded in Taint, not the cluster scheduled time or TolerationSeconds added time. - type: integer - format: int64 - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - maxLength: 1024 - status: - description: Status represents the current status of the Placement - type: object - properties: - conditions: - description: Conditions contains the different condition status for this Placement. - type: array - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - type: object - required: - - lastTransitionTime - - message - - reason - - status - - type - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - type: string - format: date-time - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - type: string - maxLength: 32768 - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - type: integer - format: int64 - minimum: 0 - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - type: string - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - status: - description: status of the condition, one of True, False, Unknown. - type: string - enum: - - "True" - - "False" - - Unknown - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - type: string - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - numberOfSelectedClusters: - description: NumberOfSelectedClusters represents the number of selected ManagedClusters - type: integer - format: int32 - served: true - storage: false - subresources: - status: {} - additionalPrinterColumns: - jsonPath: .status.conditions[?(@.type=="PlacementSatisfied")].status name: Succeeded diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_03_clusters.open-cluster-management.io_placementdecisions.crd.yaml b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_03_clusters.open-cluster-management.io_placementdecisions.crd.yaml index e6b8a0024..2a7353118 100644 --- a/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_03_clusters.open-cluster-management.io_placementdecisions.crd.yaml +++ b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_03_clusters.open-cluster-management.io_placementdecisions.crd.yaml @@ -12,48 +12,6 @@ spec: scope: Namespaced preserveUnknownFields: false versions: - - name: v1alpha1 - deprecated: true - deprecationWarning: "cluster.open-cluster-management.io/v1alpha1 PlacementDecision is deprecated; use cluster.open-cluster-management.io/v1beta1 PlacementDecision" - schema: - openAPIV3Schema: - description: "PlacementDecision indicates a decision from a placement PlacementDecision should has a label cluster.open-cluster-management.io/placement={placement name} to reference a certain placement. \n If a placement has spec.numberOfClusters specified, the total number of decisions contained in status.decisions of PlacementDecisions should always be NumberOfClusters; otherwise, the total number of decisions should be the number of ManagedClusters which match the placement requirements. \n Some of the decisions might be empty when there are no enough ManagedClusters meet the placement requirements." - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - status: - description: Status represents the current status of the PlacementDecision - type: object - required: - - decisions - properties: - decisions: - description: Decisions is a slice of decisions according to a placement The number of decisions should not be larger than 100 - type: array - items: - description: ClusterDecision represents a decision from a placement An empty ClusterDecision indicates it is not scheduled yet. - type: object - required: - - clusterName - - reason - properties: - clusterName: - description: ClusterName is the name of the ManagedCluster. If it is not empty, its value should be unique cross all placement decisions for the Placement. - type: string - reason: - description: Reason represents the reason why the ManagedCluster is selected. - type: string - served: true - storage: false - subresources: - status: {} - name: v1beta1 schema: openAPIV3Schema: diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta1/types_placement.go b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta1/types_placement.go index b5c9c8423..b4ec052c3 100644 --- a/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta1/types_placement.go +++ b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta1/types_placement.go @@ -292,3 +292,10 @@ type PlacementList struct { // Items is a list of Placements. Items []Placement `json:"items"` } + +const ( + // PlacementDisableAnnotation is used to disable scheduling for a placement. + // It is a experimental flag to let placement controller ignore this placement, + // so other placement consumers can chime in. + PlacementDisableAnnotation = "cluster.open-cluster-management.io/experimental-scheduling-disable" +) diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml new file mode 100644 index 000000000..8eeb1233f --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml @@ -0,0 +1,271 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: managedclustersets.cluster.open-cluster-management.io +spec: + group: cluster.open-cluster-management.io + names: + kind: ManagedClusterSet + listKind: ManagedClusterSetList + plural: managedclustersets + shortNames: + - mclset + - mclsets + singular: managedclusterset + scope: Cluster + preserveUnknownFields: false + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="ClusterSetEmpty")].status + name: Empty + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + deprecated: true + deprecationWarning: "cluster.open-cluster-management.io/v1beta1 ManagedClusterSet is deprecated; use cluster.open-cluster-management.io/v1beta2 ManagedClusterSet" + served: true + storage: true + subresources: + status: {} + "schema": + "openAPIV3Schema": + description: "ManagedClusterSet defines a group of ManagedClusters that user's workload can run on. A workload can be defined to deployed on a ManagedClusterSet, which mean: 1. The workload can run on any ManagedCluster in the ManagedClusterSet 2. The workload cannot run on any ManagedCluster outside the ManagedClusterSet 3. The service exposed by the workload can be shared in any ManagedCluster in the ManagedClusterSet \n In order to assign a ManagedCluster to a certian ManagedClusterSet, add a label with name `cluster.open-cluster-management.io/clusterset` on the ManagedCluster to refers to the ManagedClusterSet. User is not allow to add/remove this label on a ManagedCluster unless they have a RBAC rule to CREATE on a virtual subresource of managedclustersets/join. In order to update this label, user must have the permission on both the old and new ManagedClusterSet." + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the attributes of the ManagedClusterSet + type: object + default: + clusterSelector: + selectorType: LegacyClusterSetLabel + properties: + clusterSelector: + description: ClusterSelector represents a selector of ManagedClusters + type: object + default: + selectorType: LegacyClusterSetLabel + properties: + labelSelector: + description: LabelSelector define the general labelSelector which clusterset will use to select target managedClusters + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + selectorType: + description: SelectorType could only be "LegacyClusterSetLabel" or "LabelSelector" "LegacyClusterSetLabel" means to use label "cluster.open-cluster-management.io/clusterset:"" to select target clusters. "LabelSelector" means use labelSelector to select target managedClusters + type: string + default: LegacyClusterSetLabel + enum: + - LegacyClusterSetLabel + - LabelSelector + status: + description: Status represents the current status of the ManagedClusterSet + type: object + properties: + conditions: + description: Conditions contains the different condition statuses for this ManagedClusterSet. + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="ClusterSetEmpty")].status + name: Empty + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + served: true + storage: false + subresources: + status: {} + "schema": + "openAPIV3Schema": + description: "ManagedClusterSet defines a group of ManagedClusters that user's workload can run on. A workload can be defined to deployed on a ManagedClusterSet, which mean: 1. The workload can run on any ManagedCluster in the ManagedClusterSet 2. The workload cannot run on any ManagedCluster outside the ManagedClusterSet 3. The service exposed by the workload can be shared in any ManagedCluster in the ManagedClusterSet \n In order to assign a ManagedCluster to a certian ManagedClusterSet, add a label with name `cluster.open-cluster-management.io/clusterset` on the ManagedCluster to refers to the ManagedClusterSet. User is not allow to add/remove this label on a ManagedCluster unless they have a RBAC rule to CREATE on a virtual subresource of managedclustersets/join. In order to update this label, user must have the permission on both the old and new ManagedClusterSet." + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the attributes of the ManagedClusterSet + type: object + default: + clusterSelector: + selectorType: ExclusiveClusterSetLabel + properties: + clusterSelector: + description: ClusterSelector represents a selector of ManagedClusters + type: object + default: + selectorType: ExclusiveClusterSetLabel + properties: + labelSelector: + description: LabelSelector define the general labelSelector which clusterset will use to select target managedClusters + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + selectorType: + description: SelectorType could only be "ExclusiveClusterSetLabel" or "LabelSelector" "ExclusiveClusterSetLabel" means to use label "cluster.open-cluster-management.io/clusterset:"" to select target clusters. "LabelSelector" means use labelSelector to select target managedClusters + type: string + default: ExclusiveClusterSetLabel + enum: + - ExclusiveClusterSetLabel + - LabelSelector + status: + description: Status represents the current status of the ManagedClusterSet + type: object + properties: + conditions: + description: Conditions contains the different condition statuses for this ManagedClusterSet. + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml new file mode 100644 index 000000000..bf35bb406 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml @@ -0,0 +1,173 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: managedclustersetbindings.cluster.open-cluster-management.io +spec: + group: cluster.open-cluster-management.io + names: + kind: ManagedClusterSetBinding + listKind: ManagedClusterSetBindingList + plural: managedclustersetbindings + shortNames: + - mclsetbinding + - mclsetbindings + singular: managedclustersetbinding + scope: Namespaced + preserveUnknownFields: false + versions: + - name: v1beta1 + deprecated: true + deprecationWarning: "cluster.open-cluster-management.io/v1beta1 ManagedClusterSetBinding is deprecated; use cluster.open-cluster-management.io/v1beta2 ManagedClusterSetBinding" + schema: + openAPIV3Schema: + description: ManagedClusterSetBinding projects a ManagedClusterSet into a certain namespace. User is able to create a ManagedClusterSetBinding in a namespace and bind it to a ManagedClusterSet if they have an RBAC rule to CREATE on the virtual subresource of managedclustersets/bind. Workloads created in the same namespace can only be distributed to ManagedClusters in ManagedClusterSets bound in this namespace by higher level controllers. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the attributes of ManagedClusterSetBinding. + type: object + properties: + clusterSet: + description: ClusterSet is the name of the ManagedClusterSet to bind. It must match the instance name of the ManagedClusterSetBinding and cannot change once created. User is allowed to set this field if they have an RBAC rule to CREATE on the virtual subresource of managedclustersets/bind. + type: string + minLength: 1 + status: + description: Status represents the current status of the ManagedClusterSetBinding + type: object + properties: + conditions: + description: Conditions contains the different condition statuses for this ManagedClusterSetBinding. + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + served: true + storage: true + subresources: + status: {} + - name: v1beta2 + schema: + openAPIV3Schema: + description: ManagedClusterSetBinding projects a ManagedClusterSet into a certain namespace. User is able to create a ManagedClusterSetBinding in a namespace and bind it to a ManagedClusterSet if they have an RBAC rule to CREATE on the virtual subresource of managedclustersets/bind. Workloads created in the same namespace can only be distributed to ManagedClusters in ManagedClusterSets bound in this namespace by higher level controllers. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the attributes of ManagedClusterSetBinding. + type: object + properties: + clusterSet: + description: ClusterSet is the name of the ManagedClusterSet to bind. It must match the instance name of the ManagedClusterSetBinding and cannot change once created. User is allowed to set this field if they have an RBAC rule to CREATE on the virtual subresource of managedclustersets/bind. + type: string + minLength: 1 + status: + description: Status represents the current status of the ManagedClusterSetBinding + type: object + properties: + conditions: + description: Conditions contains the different condition statuses for this ManagedClusterSetBinding. + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + served: true + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/doc.go b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/doc.go new file mode 100644 index 000000000..105f7e834 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/doc.go @@ -0,0 +1,9 @@ +// Package v1beta2 contains API Schema definitions for the cluster v1beta2 API group +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=open-cluster-management.io/api/cluster +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=cluster.open-cluster-management.io +package v1beta2 diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/helpers.go b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/helpers.go new file mode 100644 index 000000000..25847109d --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/helpers.go @@ -0,0 +1,110 @@ +package v1beta2 + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + v1 "open-cluster-management.io/api/cluster/v1" +) + +type ManagedClustersGetter interface { + List(selector labels.Selector) (ret []*v1.ManagedCluster, err error) +} + +type ManagedClusterSetsGetter interface { + List(selector labels.Selector) (ret []*ManagedClusterSet, err error) +} + +type ManagedClusterSetBindingsGetter interface { + List(namespace string, selector labels.Selector) (ret []*ManagedClusterSetBinding, err error) +} + +// GetClustersFromClusterSet return the ManagedClusterSet's managedClusters +func GetClustersFromClusterSet(clusterSet *ManagedClusterSet, + clustersGetter ManagedClustersGetter) ([]*v1.ManagedCluster, error) { + var clusters []*v1.ManagedCluster + + if clusterSet == nil { + return nil, nil + } + + clusterSelector, err := BuildClusterSelector(clusterSet) + if err != nil { + return nil, err + } + if clusterSelector == nil { + return nil, fmt.Errorf("failed to build ClusterSelector with clusterSet: %v", clusterSet) + } + clusters, err = clustersGetter.List(clusterSelector) + if err != nil { + return nil, fmt.Errorf("failed to list ManagedClusters: %w", err) + } + return clusters, nil +} + +// GetClusterSetsOfClusterByCluster return the managedClusterSets of a managedCluster +func GetClusterSetsOfCluster(cluster *v1.ManagedCluster, + clusterSetsGetter ManagedClusterSetsGetter) ([]*ManagedClusterSet, error) { + var returnClusterSets []*ManagedClusterSet + + if cluster == nil { + return nil, nil + } + + allClusterSets, err := clusterSetsGetter.List(labels.Everything()) + if err != nil { + return nil, err + } + for _, clusterSet := range allClusterSets { + clusterSelector, err := BuildClusterSelector(clusterSet) + if err != nil { + return nil, err + } + if clusterSelector == nil { + return nil, fmt.Errorf("failed to build ClusterSelector with clusterSet: %v", clusterSet) + } + if clusterSelector.Matches(labels.Set(cluster.Labels)) { + returnClusterSets = append(returnClusterSets, clusterSet) + } + } + return returnClusterSets, nil +} + +func BuildClusterSelector(clusterSet *ManagedClusterSet) (labels.Selector, error) { + if clusterSet == nil { + return nil, nil + } + selectorType := clusterSet.Spec.ClusterSelector.SelectorType + + switch selectorType { + case "", ExclusiveClusterSetLabel: + return labels.SelectorFromSet(labels.Set{ + ClusterSetLabel: clusterSet.Name, + }), nil + case LabelSelector: + return metav1.LabelSelectorAsSelector(clusterSet.Spec.ClusterSelector.LabelSelector) + default: + return nil, fmt.Errorf("selectorType is not right: %s", clusterSet.Spec.ClusterSelector.SelectorType) + } +} + +// GetBoundManagedClusterSetBindings returns all bindings that are bounded to clustersets in the given namespace. +func GetBoundManagedClusterSetBindings(namespace string, + clusterSetBindingsGetter ManagedClusterSetBindingsGetter) ([]*ManagedClusterSetBinding, error) { + // get all clusterset bindings under the namespace + bindings, err := clusterSetBindingsGetter.List(namespace, labels.Everything()) + if err != nil { + return nil, err + } + + boundBindings := []*ManagedClusterSetBinding{} + for _, binding := range bindings { + if meta.IsStatusConditionTrue(binding.Status.Conditions, ClusterSetBindingBoundType) { + boundBindings = append(boundBindings, binding) + } + } + + return boundBindings, nil +} diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/register.go b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/register.go new file mode 100644 index 000000000..ed058a663 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/register.go @@ -0,0 +1,40 @@ +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "cluster.open-cluster-management.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ManagedClusterSet{}, + &ManagedClusterSetList{}, + &ManagedClusterSetBinding{}, + &ManagedClusterSetBindingList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/types_managedclusterset.go b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/types_managedclusterset.go new file mode 100644 index 000000000..ce8e93bc0 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/types_managedclusterset.go @@ -0,0 +1,99 @@ +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +//ExclusiveClusterSetLabel LabelKey +const ClusterSetLabel = "cluster.open-cluster-management.io/clusterset" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope="Cluster",shortName={"mclset","mclsets"} +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Empty",type="string",JSONPath=".status.conditions[?(@.type==\"ClusterSetEmpty\")].status" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// ManagedClusterSet defines a group of ManagedClusters that user's workload can run on. +// A workload can be defined to deployed on a ManagedClusterSet, which mean: +// 1. The workload can run on any ManagedCluster in the ManagedClusterSet +// 2. The workload cannot run on any ManagedCluster outside the ManagedClusterSet +// 3. The service exposed by the workload can be shared in any ManagedCluster in the ManagedClusterSet +// +// In order to assign a ManagedCluster to a certian ManagedClusterSet, add a label with name +// `cluster.open-cluster-management.io/clusterset` on the ManagedCluster to refers to the ManagedClusterSet. +// User is not allow to add/remove this label on a ManagedCluster unless they have a RBAC rule to CREATE on +// a virtual subresource of managedclustersets/join. In order to update this label, user must have the permission +// on both the old and new ManagedClusterSet. +type ManagedClusterSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the attributes of the ManagedClusterSet + // +kubebuilder:default={clusterSelector: {selectorType: ExclusiveClusterSetLabel}} + Spec ManagedClusterSetSpec `json:"spec"` + + // Status represents the current status of the ManagedClusterSet + // +optional + Status ManagedClusterSetStatus `json:"status,omitempty"` +} + +// ManagedClusterSetSpec describes the attributes of the ManagedClusterSet +type ManagedClusterSetSpec struct { + // ClusterSelector represents a selector of ManagedClusters + // +optional + // +kubebuilder:default:={selectorType: ExclusiveClusterSetLabel} + ClusterSelector ManagedClusterSelector `json:"clusterSelector,omitempty"` +} + +// ManagedClusterSelector represents a selector of ManagedClusters +type ManagedClusterSelector struct { + // SelectorType could only be "ExclusiveClusterSetLabel" or "LabelSelector" + // "ExclusiveClusterSetLabel" means to use label "cluster.open-cluster-management.io/clusterset:"" to select target clusters. + // "LabelSelector" means use labelSelector to select target managedClusters + // +kubebuilder:validation:Enum=ExclusiveClusterSetLabel;LabelSelector + // +kubebuilder:default:=ExclusiveClusterSetLabel + // +required + SelectorType SelectorType `json:"selectorType,omitempty"` + + // LabelSelector define the general labelSelector which clusterset will use to select target managedClusters + // +optional + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` +} + +type SelectorType string + +const ( + // "ExclusiveClusterSetLabel" means to use label "cluster.open-cluster-management.io/clusterset:"" to select target clusters. + ExclusiveClusterSetLabel SelectorType = "ExclusiveClusterSetLabel" + // "LabelSelector" means use labelSelector to select target managedClusters + LabelSelector SelectorType = "LabelSelector" +) + +// ManagedClusterSetStatus represents the current status of the ManagedClusterSet. +type ManagedClusterSetStatus struct { + // Conditions contains the different condition statuses for this ManagedClusterSet. + Conditions []metav1.Condition `json:"conditions"` +} + +const ( + // ManagedClusterSetConditionEmpty means no ManagedCluster is included in the + // ManagedClusterSet. + ManagedClusterSetConditionEmpty string = "ClusterSetEmpty" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ManagedClusterSetList is a collection of ManagedClusterSet. +type ManagedClusterSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is a list of ManagedClusterSet. + Items []ManagedClusterSet `json:"items"` +} diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/types_managedclustersetbinding.go b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/types_managedclustersetbinding.go new file mode 100644 index 000000000..4871eeb14 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/types_managedclustersetbinding.go @@ -0,0 +1,65 @@ +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope="Namespaced",shortName={"mclsetbinding","mclsetbindings"} +// +kubebuilder:storageversion + +// ManagedClusterSetBinding projects a ManagedClusterSet into a certain namespace. +// User is able to create a ManagedClusterSetBinding in a namespace and bind it to a +// ManagedClusterSet if they have an RBAC rule to CREATE on the virtual subresource of +// managedclustersets/bind. Workloads created in the same namespace can only be +// distributed to ManagedClusters in ManagedClusterSets bound in this namespace by +// higher level controllers. +type ManagedClusterSetBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the attributes of ManagedClusterSetBinding. + Spec ManagedClusterSetBindingSpec `json:"spec"` + + // Status represents the current status of the ManagedClusterSetBinding + // +optional + Status ManagedClusterSetBindingStatus `json:"status,omitempty"` +} + +// ManagedClusterSetBindingSpec defines the attributes of ManagedClusterSetBinding. +type ManagedClusterSetBindingSpec struct { + // ClusterSet is the name of the ManagedClusterSet to bind. It must match the + // instance name of the ManagedClusterSetBinding and cannot change once created. + // User is allowed to set this field if they have an RBAC rule to CREATE on the + // virtual subresource of managedclustersets/bind. + // +kubebuilder:validation:MinLength=1 + ClusterSet string `json:"clusterSet"` +} + +const ( + // ClusterSetBindingBoundType is a condition type of clustersetbinding representing + // whether the ClusterSetBinding is bound to a clusterset. + ClusterSetBindingBoundType = "Bound" +) + +// ManagedClusterSetBindingStatus represents the current status of the ManagedClusterSetBinding. +type ManagedClusterSetBindingStatus struct { + // Conditions contains the different condition statuses for this ManagedClusterSetBinding. + Conditions []metav1.Condition `json:"conditions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ManagedClusterSetBindingList is a collection of ManagedClusterSetBinding. +type ManagedClusterSetBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is a list of ManagedClusterSetBinding. + Items []ManagedClusterSetBinding `json:"items"` +} diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/zz_generated.deepcopy.go b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..62bd2dfbe --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,233 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterSelector) DeepCopyInto(out *ManagedClusterSelector) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSelector. +func (in *ManagedClusterSelector) DeepCopy() *ManagedClusterSelector { + if in == nil { + return nil + } + out := new(ManagedClusterSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterSet) DeepCopyInto(out *ManagedClusterSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSet. +func (in *ManagedClusterSet) DeepCopy() *ManagedClusterSet { + if in == nil { + return nil + } + out := new(ManagedClusterSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagedClusterSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterSetBinding) DeepCopyInto(out *ManagedClusterSetBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetBinding. +func (in *ManagedClusterSetBinding) DeepCopy() *ManagedClusterSetBinding { + if in == nil { + return nil + } + out := new(ManagedClusterSetBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagedClusterSetBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterSetBindingList) DeepCopyInto(out *ManagedClusterSetBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ManagedClusterSetBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetBindingList. +func (in *ManagedClusterSetBindingList) DeepCopy() *ManagedClusterSetBindingList { + if in == nil { + return nil + } + out := new(ManagedClusterSetBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagedClusterSetBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterSetBindingSpec) DeepCopyInto(out *ManagedClusterSetBindingSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetBindingSpec. +func (in *ManagedClusterSetBindingSpec) DeepCopy() *ManagedClusterSetBindingSpec { + if in == nil { + return nil + } + out := new(ManagedClusterSetBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterSetBindingStatus) DeepCopyInto(out *ManagedClusterSetBindingStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetBindingStatus. +func (in *ManagedClusterSetBindingStatus) DeepCopy() *ManagedClusterSetBindingStatus { + if in == nil { + return nil + } + out := new(ManagedClusterSetBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterSetList) DeepCopyInto(out *ManagedClusterSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ManagedClusterSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetList. +func (in *ManagedClusterSetList) DeepCopy() *ManagedClusterSetList { + if in == nil { + return nil + } + out := new(ManagedClusterSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagedClusterSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterSetSpec) DeepCopyInto(out *ManagedClusterSetSpec) { + *out = *in + in.ClusterSelector.DeepCopyInto(&out.ClusterSelector) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetSpec. +func (in *ManagedClusterSetSpec) DeepCopy() *ManagedClusterSetSpec { + if in == nil { + return nil + } + out := new(ManagedClusterSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterSetStatus) DeepCopyInto(out *ManagedClusterSetStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetStatus. +func (in *ManagedClusterSetStatus) DeepCopy() *ManagedClusterSetStatus { + if in == nil { + return nil + } + out := new(ManagedClusterSetStatus) + in.DeepCopyInto(out) + return out +} diff --git a/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/zz_generated.swagger_doc_generated.go b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..e5bdb6868 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/api/cluster/v1beta2/zz_generated.swagger_doc_generated.go @@ -0,0 +1,100 @@ +package v1beta2 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ManagedClusterSelector = map[string]string{ + "": "ManagedClusterSelector represents a selector of ManagedClusters", + "selectorType": "SelectorType could only be \"ExclusiveClusterSetLabel\" or \"LabelSelector\" \"ExclusiveClusterSetLabel\" means to use label \"cluster.open-cluster-management.io/clusterset:\"\" to select target clusters. \"LabelSelector\" means use labelSelector to select target managedClusters", + "labelSelector": "LabelSelector define the general labelSelector which clusterset will use to select target managedClusters", +} + +func (ManagedClusterSelector) SwaggerDoc() map[string]string { + return map_ManagedClusterSelector +} + +var map_ManagedClusterSet = map[string]string{ + "": "ManagedClusterSet defines a group of ManagedClusters that user's workload can run on. A workload can be defined to deployed on a ManagedClusterSet, which mean:\n 1. The workload can run on any ManagedCluster in the ManagedClusterSet\n 2. The workload cannot run on any ManagedCluster outside the ManagedClusterSet\n 3. The service exposed by the workload can be shared in any ManagedCluster in the ManagedClusterSet\n\nIn order to assign a ManagedCluster to a certian ManagedClusterSet, add a label with name `cluster.open-cluster-management.io/clusterset` on the ManagedCluster to refers to the ManagedClusterSet. User is not allow to add/remove this label on a ManagedCluster unless they have a RBAC rule to CREATE on a virtual subresource of managedclustersets/join. In order to update this label, user must have the permission on both the old and new ManagedClusterSet.", + "spec": "Spec defines the attributes of the ManagedClusterSet", + "status": "Status represents the current status of the ManagedClusterSet", +} + +func (ManagedClusterSet) SwaggerDoc() map[string]string { + return map_ManagedClusterSet +} + +var map_ManagedClusterSetList = map[string]string{ + "": "ManagedClusterSetList is a collection of ManagedClusterSet.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "Items is a list of ManagedClusterSet.", +} + +func (ManagedClusterSetList) SwaggerDoc() map[string]string { + return map_ManagedClusterSetList +} + +var map_ManagedClusterSetSpec = map[string]string{ + "": "ManagedClusterSetSpec describes the attributes of the ManagedClusterSet", + "clusterSelector": "ClusterSelector represents a selector of ManagedClusters", +} + +func (ManagedClusterSetSpec) SwaggerDoc() map[string]string { + return map_ManagedClusterSetSpec +} + +var map_ManagedClusterSetStatus = map[string]string{ + "": "ManagedClusterSetStatus represents the current status of the ManagedClusterSet.", + "conditions": "Conditions contains the different condition statuses for this ManagedClusterSet.", +} + +func (ManagedClusterSetStatus) SwaggerDoc() map[string]string { + return map_ManagedClusterSetStatus +} + +var map_ManagedClusterSetBinding = map[string]string{ + "": "ManagedClusterSetBinding projects a ManagedClusterSet into a certain namespace. User is able to create a ManagedClusterSetBinding in a namespace and bind it to a ManagedClusterSet if they have an RBAC rule to CREATE on the virtual subresource of managedclustersets/bind. Workloads created in the same namespace can only be distributed to ManagedClusters in ManagedClusterSets bound in this namespace by higher level controllers.", + "spec": "Spec defines the attributes of ManagedClusterSetBinding.", + "status": "Status represents the current status of the ManagedClusterSetBinding", +} + +func (ManagedClusterSetBinding) SwaggerDoc() map[string]string { + return map_ManagedClusterSetBinding +} + +var map_ManagedClusterSetBindingList = map[string]string{ + "": "ManagedClusterSetBindingList is a collection of ManagedClusterSetBinding.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "Items is a list of ManagedClusterSetBinding.", +} + +func (ManagedClusterSetBindingList) SwaggerDoc() map[string]string { + return map_ManagedClusterSetBindingList +} + +var map_ManagedClusterSetBindingSpec = map[string]string{ + "": "ManagedClusterSetBindingSpec defines the attributes of ManagedClusterSetBinding.", + "clusterSet": "ClusterSet is the name of the ManagedClusterSet to bind. It must match the instance name of the ManagedClusterSetBinding and cannot change once created. User is allowed to set this field if they have an RBAC rule to CREATE on the virtual subresource of managedclustersets/bind.", +} + +func (ManagedClusterSetBindingSpec) SwaggerDoc() map[string]string { + return map_ManagedClusterSetBindingSpec +} + +var map_ManagedClusterSetBindingStatus = map[string]string{ + "": "ManagedClusterSetBindingStatus represents the current status of the ManagedClusterSetBinding.", + "conditions": "Conditions contains the different condition statuses for this ManagedClusterSetBinding.", +} + +func (ManagedClusterSetBindingStatus) SwaggerDoc() map[string]string { + return map_ManagedClusterSetBindingStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/controlplane/vendor/open-cluster-management.io/api/feature/feature.go b/controlplane/vendor/open-cluster-management.io/api/feature/feature.go index 4d7fc9d9c..af1278d1e 100644 --- a/controlplane/vendor/open-cluster-management.io/api/feature/feature.go +++ b/controlplane/vendor/open-cluster-management.io/api/feature/feature.go @@ -40,6 +40,11 @@ const ( // means that all the approved CSR objects will be signed by the built-in CSR controller in // kube-controller-manager. V1beta1CSRAPICompatibility featuregate.Feature = "V1beta1CSRAPICompatibility" + + // NilExecutorValidating will make the work-webhook to validate the manifest work even if its executor is nil, it + // will check if the request user has the execute-as permission with the default executor + // "system:serviceaccount::klusterlet-work-sa" + NilExecutorValidating featuregate.Feature = "NilExecutorValidating" ) // DefaultSpokeRegistrationFeatureGates consists of all known ocm-registration @@ -58,3 +63,9 @@ var DefaultHubRegistrationFeatureGates = map[featuregate.Feature]featuregate.Fea DefaultClusterSet: {Default: false, PreRelease: featuregate.Alpha}, V1beta1CSRAPICompatibility: {Default: false, PreRelease: featuregate.Alpha}, } + +// DefaultHubWorkFeatureGates consists of all known acm work wehbook feature keys. +// To add a new feature, define a key for it above and add it here. +var DefaultHubWorkFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ + NilExecutorValidating: {Default: false, PreRelease: featuregate.Alpha}, +} diff --git a/controlplane/vendor/open-cluster-management.io/placement/LICENSE b/controlplane/vendor/open-cluster-management.io/placement/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/placement/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/framework/interface.go b/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/framework/interface.go new file mode 100644 index 000000000..5e1f7e62f --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/framework/interface.go @@ -0,0 +1,100 @@ +package framework + +import ( + "errors" + "strings" +) + +// Code is the Status code/type which is returned from the framework. +type Code int + +// These are predefined codes used in a Status. +const ( + // Success means that plugin ran correctly and found placement schedulable. + // NOTE: A nil status is also considered as "Success". + Success Code = iota + // Warning means that plugin ran correctly and found placement schedulable, but with some failures to notice. + Warning + // Error is used for internal plugin errors etc. + Error + // Misconfigured is used for internal plugin configuration errors, unexpected input, etc. + Misconfigured +) + +type Status struct { + code Code + // reasons contains the message about status. + reasons []string + // Err contains the error message. + err error + // plugin is an optional field that records the plugin name. + plugin string +} + +// Code returns code of the Status. +func (s *Status) Code() Code { + if s == nil { + return Success + } + return s.code +} + +// Message returns a concatenated message on reasons of the Status. +func (s *Status) Message() string { + if s == nil { + return "" + } + return strings.Join(s.reasons, ", ") +} + +// AppendReason appends given reason to the Status. +func (s *Status) AppendReason(reason string) { + s.reasons = append(s.reasons, reason) +} + +// IsSuccess returns true if and only if "Status" is nil or Code is "Success". +func (s *Status) IsSuccess() bool { + return s.Code() == Success +} + +// IsError returns true if and only if Code is "Error" or "Misconfigured". +func (s *Status) IsError() bool { + switch s.Code() { + case Error: + return true + case Misconfigured: + return true + default: + return false + } +} + +// AsError returns nil if the status is a success; otherwise returns an "error" object +// with a concatenated message on reasons of the Status. +func (s *Status) AsError() error { + if s.Code() == Success { + return nil + } + if s.err != nil { + return s.err + } + return errors.New(s.Message()) +} + +// FailedPlugin returns the failed plugin name. +func (s *Status) Plugin() string { + return s.plugin +} + +// NewStatus makes a Status out of the given arguments and returns its pointer. +func NewStatus(plugin string, code Code, reasons ...string) *Status { + s := &Status{ + code: code, + reasons: reasons, + plugin: plugin, + } + if code == Error { + s.err = errors.New(s.Message()) + } + return s +} diff --git a/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/manager.go b/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/manager.go new file mode 100644 index 000000000..0ca94bb06 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/manager.go @@ -0,0 +1,91 @@ +package hub + +import ( + "context" + "net/http" + "time" + + "github.com/openshift/library-go/pkg/controller/controllercmd" + "k8s.io/apiserver/pkg/server/mux" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/events" + clusterclient "open-cluster-management.io/api/client/cluster/clientset/versioned" + clusterscheme "open-cluster-management.io/api/client/cluster/clientset/versioned/scheme" + clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions" + scheduling "open-cluster-management.io/placement/pkg/controllers/scheduling" + "open-cluster-management.io/placement/pkg/debugger" +) + +// RunControllerManager starts the controllers on hub to make placement decisions. +func RunControllerManager(ctx context.Context, controllerContext *controllercmd.ControllerContext) error { + clusterClient, err := clusterclient.NewForConfig(controllerContext.KubeConfig) + if err != nil { + return err + } + + kubeClient, err := kubernetes.NewForConfig(controllerContext.KubeConfig) + if err != nil { + return err + } + + clusterInformers := clusterinformers.NewSharedInformerFactory(clusterClient, 10*time.Minute) + + broadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: kubeClient.EventsV1()}) + + broadcaster.StartRecordingToSink(ctx.Done()) + + recorder := broadcaster.NewRecorder(clusterscheme.Scheme, "placementController") + + scheduler := scheduling.NewPluginScheduler( + scheduling.NewSchedulerHandler( + clusterClient, + clusterInformers.Cluster().V1beta1().PlacementDecisions().Lister(), + clusterInformers.Cluster().V1alpha1().AddOnPlacementScores().Lister(), + clusterInformers.Cluster().V1().ManagedClusters().Lister(), + recorder), + ) + + if controllerContext.Server != nil { + debug := debugger.NewDebugger( + scheduler, + clusterInformers.Cluster().V1beta1().Placements(), + clusterInformers.Cluster().V1().ManagedClusters(), + ) + + installDebugger(controllerContext.Server.Handler.NonGoRestfulMux, debug) + } + + schedulingController := scheduling.NewSchedulingController( + clusterClient, + clusterInformers.Cluster().V1().ManagedClusters(), + clusterInformers.Cluster().V1beta1().ManagedClusterSets(), + clusterInformers.Cluster().V1beta1().ManagedClusterSetBindings(), + clusterInformers.Cluster().V1beta1().Placements(), + clusterInformers.Cluster().V1beta1().PlacementDecisions(), + scheduler, + controllerContext.EventRecorder, recorder, + ) + + schedulingControllerResync := scheduling.NewSchedulingControllerResync( + clusterClient, + clusterInformers.Cluster().V1().ManagedClusters(), + clusterInformers.Cluster().V1beta1().ManagedClusterSets(), + clusterInformers.Cluster().V1beta1().ManagedClusterSetBindings(), + clusterInformers.Cluster().V1beta1().Placements(), + clusterInformers.Cluster().V1beta1().PlacementDecisions(), + scheduler, + controllerContext.EventRecorder, recorder, + ) + + go clusterInformers.Start(ctx.Done()) + + go schedulingController.Run(ctx, 1) + go schedulingControllerResync.Run(ctx, 1) + + <-ctx.Done() + return nil +} + +func installDebugger(mux *mux.PathRecorderMux, d *debugger.Debugger) { + mux.HandlePrefix(debugger.DebugPath, http.HandlerFunc(d.Handler)) +} diff --git a/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/scheduling/cluster_event_handler.go b/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/scheduling/cluster_event_handler.go new file mode 100644 index 000000000..e5eee2e86 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/scheduling/cluster_event_handler.go @@ -0,0 +1,105 @@ +package scheduling + +import ( + "fmt" + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + cache "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" + clusterapiv1 "open-cluster-management.io/api/cluster/v1" + clusterapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +type clusterEventHandler struct { + clusterSetLister clusterlisterv1beta1.ManagedClusterSetLister + clusterSetBindingLister clusterlisterv1beta1.ManagedClusterSetBindingLister + placementLister clusterlisterv1beta1.PlacementLister + enqueuePlacementFunc enqueuePlacementFunc +} + +func (h *clusterEventHandler) OnAdd(obj interface{}) { + h.onChange(obj) +} + +func (h *clusterEventHandler) OnUpdate(oldObj, newObj interface{}) { + newCluster, ok := newObj.(*clusterapiv1.ManagedCluster) + if !ok { + return + } + h.onChange(newObj) + + if oldObj == nil { + return + } + oldCluster, ok := oldObj.(*clusterapiv1.ManagedCluster) + if !ok { + return + } + + // if the cluster labels changes, process the original clusterset + if !reflect.DeepEqual(newCluster.Labels, oldCluster.Labels) { + h.onChange(oldCluster) + } +} + +func (h *clusterEventHandler) OnDelete(obj interface{}) { + switch t := obj.(type) { + case *clusterapiv1.ManagedCluster: + h.onChange(obj) + case cache.DeletedFinalStateUnknown: + h.onChange(t.Obj) + default: + utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type")) + } +} + +func (h *clusterEventHandler) onChange(obj interface{}) { + cluster, ok := obj.(metav1.Object) + if !ok { + utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type")) + return + } + + clusterSetNames, err := h.getClusterSetNames(cluster) + if err != nil { + klog.V(4).Infof("Unable to get clusterset of cluster %q: %v", cluster.GetName(), err) + return + } + + // skip cluster belongs to no clusterset + if len(clusterSetNames) == 0 { + return + } + + for _, clusterSetName := range clusterSetNames { + // enqueue placements which might be impacted + err = enqueuePlacementsByClusterSet( + clusterSetName, + h.clusterSetBindingLister, + h.placementLister, + h.enqueuePlacementFunc, + ) + if err != nil { + klog.Errorf("Unable to enqueue placements with access to clusterset %q: %v", clusterSetName, err) + } + } +} + +// getClusterSetName returns the name of the clusterset the cluster belongs to. It also checks the existence +// of the clusterset. +func (h *clusterEventHandler) getClusterSetNames(cluster metav1.Object) ([]string, error) { + clusterSetNames := []string{} + clusterSets, err := clusterapiv1beta1.GetClusterSetsOfCluster(cluster.(*clusterapiv1.ManagedCluster), h.clusterSetLister) + if err != nil { + return clusterSetNames, err + } + + for _, cs := range clusterSets { + clusterSetNames = append(clusterSetNames, cs.Name) + } + + return clusterSetNames, nil +} diff --git a/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/scheduling/clusterset_event_handler.go b/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/scheduling/clusterset_event_handler.go new file mode 100644 index 000000000..798c7fa78 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/scheduling/clusterset_event_handler.go @@ -0,0 +1,95 @@ +package scheduling + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + cache "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + + errorhelpers "github.com/openshift/library-go/pkg/operator/v1helpers" + clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" + clusterapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +type clusterSetEventHandler struct { + clusterSetBindingLister clusterlisterv1beta1.ManagedClusterSetBindingLister + placementLister clusterlisterv1beta1.PlacementLister + enqueuePlacementFunc enqueuePlacementFunc +} + +func (h *clusterSetEventHandler) OnAdd(obj interface{}) { + h.onChange(obj) +} + +func (h *clusterSetEventHandler) OnUpdate(oldObj, newObj interface{}) { + // ignore Update event +} + +func (h *clusterSetEventHandler) OnDelete(obj interface{}) { + var clusterSetName string + switch t := obj.(type) { + case *clusterapiv1beta1.ManagedClusterSet: + clusterSetName = t.Name + case cache.DeletedFinalStateUnknown: + clusterSet, ok := t.Obj.(metav1.Object) + if !ok { + utilruntime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type")) + return + } + clusterSetName = clusterSet.GetName() + default: + utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type")) + return + } + + err := enqueuePlacementsByClusterSet(clusterSetName, h.clusterSetBindingLister, + h.placementLister, h.enqueuePlacementFunc) + if err != nil { + klog.Errorf("Unable to enqueue placements by clusterset %q: %v", clusterSetName, err) + } +} + +func (h *clusterSetEventHandler) onChange(obj interface{}) { + accessor, err := meta.Accessor(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("error accessing metadata: %w", err)) + return + } + + clusterSetName := accessor.GetName() + err = enqueuePlacementsByClusterSet(clusterSetName, h.clusterSetBindingLister, + h.placementLister, h.enqueuePlacementFunc) + if err != nil { + klog.Errorf("Unable to enqueue placements by clusterset %q: %v", clusterSetName, err) + } +} + +// enqueuePlacementsByClusterSet enqueues placements that might be impacted by the given clusterset into +// controller queue for further reconciliation +func enqueuePlacementsByClusterSet( + clusterSetName string, + clusterSetBindingLister clusterlisterv1beta1.ManagedClusterSetBindingLister, + placementLister clusterlisterv1beta1.PlacementLister, + enqueuePlacementFunc enqueuePlacementFunc, +) error { + bindings, err := clusterSetBindingLister.List(labels.Everything()) + if err != nil { + return err + } + + errs := []error{} + for _, binding := range bindings { + if binding.Name != clusterSetName { + continue + } + + if err := enqueuePlacementsByClusterSetBinding(binding.Namespace, binding.Name, placementLister, enqueuePlacementFunc); err != nil { + errs = append(errs, err) + } + } + return errorhelpers.NewMultiLineAggregate(errs) +} diff --git a/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/scheduling/clustersetbinding_event_handler.go b/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/scheduling/clustersetbinding_event_handler.go new file mode 100644 index 000000000..62dc3dc74 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/scheduling/clustersetbinding_event_handler.go @@ -0,0 +1,90 @@ +package scheduling + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + cache "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + + clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" + clusterapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +type clusterSetBindingEventHandler struct { + clusterSetLister clusterlisterv1beta1.ManagedClusterSetLister + placementLister clusterlisterv1beta1.PlacementLister + enqueuePlacementFunc enqueuePlacementFunc +} + +func (h *clusterSetBindingEventHandler) OnAdd(obj interface{}) { + h.onChange(obj) +} + +func (h *clusterSetBindingEventHandler) OnUpdate(oldObj, newObj interface{}) { + h.onChange(newObj) +} + +func (h *clusterSetBindingEventHandler) OnDelete(obj interface{}) { + switch t := obj.(type) { + case *clusterapiv1beta1.ManagedClusterSetBinding: + h.onChange(obj) + case cache.DeletedFinalStateUnknown: + h.onChange(t.Obj) + default: + utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type")) + } +} + +func (h *clusterSetBindingEventHandler) onChange(obj interface{}) { + clusterSetBinding, ok := obj.(metav1.Object) + if !ok { + utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type")) + return + } + + namespace := clusterSetBinding.GetNamespace() + clusterSetBindingName := clusterSetBinding.GetName() + + _, err := h.clusterSetLister.Get(clusterSetBindingName) + // skip if the clusterset does not exist + if errors.IsNotFound(err) { + return + } + if err != nil { + klog.Errorf("Unable to get clusterset %q: %v", clusterSetBindingName, err) + return + } + + err = enqueuePlacementsByClusterSetBinding(namespace, clusterSetBindingName, h.placementLister, h.enqueuePlacementFunc) + if err != nil { + klog.Errorf("Unable to enqueue placements by clustersetbinding %s/%s: %v", namespace, clusterSetBindingName, err) + } +} + +// enqueuePlacementsByClusterSetBinding enqueues placements that might be impacted by a particular clustersetbinding +// into controller queue for further reconciliation +func enqueuePlacementsByClusterSetBinding( + namespace, clusterSetBindingName string, + placementLister clusterlisterv1beta1.PlacementLister, + enqueuePlacementFunc enqueuePlacementFunc, +) error { + placements, err := placementLister.Placements(namespace).List(labels.Everything()) + if err != nil { + return err + } + for _, placement := range placements { + // ignore placement whose .spec.clusterSets is specified but does no include this + // particular clusterset. + clusterSets := sets.NewString(placement.Spec.ClusterSets...) + if clusterSets.Len() != 0 && !clusterSets.Has(clusterSetBindingName) { + continue + } + enqueuePlacementFunc(placement.Namespace, placement.Name) + } + return nil +} diff --git a/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/scheduling/schedule.go b/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/scheduling/schedule.go new file mode 100644 index 000000000..668c3c302 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/scheduling/schedule.go @@ -0,0 +1,430 @@ +package scheduling + +import ( + "context" + "fmt" + "sort" + "strings" + "time" + + kevents "k8s.io/client-go/tools/events" + "k8s.io/klog/v2" + clusterclient "open-cluster-management.io/api/client/cluster/clientset/versioned" + clusterlisterv1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1" + clusterlisterv1alpha1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1" + clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" + clusterapiv1 "open-cluster-management.io/api/cluster/v1" + clusterapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + "open-cluster-management.io/placement/pkg/controllers/framework" + "open-cluster-management.io/placement/pkg/plugins" + "open-cluster-management.io/placement/pkg/plugins/addon" + "open-cluster-management.io/placement/pkg/plugins/balance" + "open-cluster-management.io/placement/pkg/plugins/predicate" + "open-cluster-management.io/placement/pkg/plugins/resource" + "open-cluster-management.io/placement/pkg/plugins/steady" + "open-cluster-management.io/placement/pkg/plugins/tainttoleration" +) + +const ( + PrioritizerBalance string = "Balance" + PrioritizerSteady string = "Steady" + PrioritizerResourceAllocatableCPU string = "ResourceAllocatableCPU" + PrioritizerResourceAllocatableMemory string = "ResourceAllocatableMemory" +) + +// PrioritizerScore defines the score for each cluster +type PrioritizerScore map[string]int64 + +// Scheduler is an interface for scheduler, it returs the scheduler results +type Scheduler interface { + Schedule( + ctx context.Context, + placement *clusterapiv1beta1.Placement, + clusters []*clusterapiv1.ManagedCluster, + ) (ScheduleResult, *framework.Status) +} + +type ScheduleResult interface { + // FilterResults returns results for each filter + FilterResults() []FilterResult + + // PrioritizerResults returns results for each prioritizer + PrioritizerResults() []PrioritizerResult + + // PrioritizerScores returns total score for each cluster + PrioritizerScores() PrioritizerScore + + // Decisions returns the decisions of the schedule + Decisions() []clusterapiv1beta1.ClusterDecision + + // NumOfUnscheduled returns the number of unscheduled. + NumOfUnscheduled() int + + // RequeueAfter returns the requeue time interval of the placement + RequeueAfter() *time.Duration +} + +type FilterResult struct { + Name string `json:"name"` + FilteredClusters []string `json:"filteredClusters"` +} + +// PrioritizerResult defines the result of one prioritizer, +// include name, weight, and score of each cluster. +type PrioritizerResult struct { + Name string `json:"name"` + Weight int32 `json:"weight"` + Scores PrioritizerScore `json:"scores"` +} + +// ScheduleResult is the result for a certain schedule. +type scheduleResult struct { + feasibleClusters []*clusterapiv1.ManagedCluster + scheduledDecisions []clusterapiv1beta1.ClusterDecision + unscheduledDecisions int + + filteredRecords map[string][]*clusterapiv1.ManagedCluster + scoreRecords []PrioritizerResult + scoreSum PrioritizerScore + requeueAfter *time.Duration +} + +type schedulerHandler struct { + recorder kevents.EventRecorder + placementDecisionLister clusterlisterv1beta1.PlacementDecisionLister + scoreLister clusterlisterv1alpha1.AddOnPlacementScoreLister + clusterLister clusterlisterv1.ManagedClusterLister + clusterClient clusterclient.Interface +} + +func NewSchedulerHandler( + clusterClient clusterclient.Interface, placementDecisionLister clusterlisterv1beta1.PlacementDecisionLister, scoreLister clusterlisterv1alpha1.AddOnPlacementScoreLister, clusterLister clusterlisterv1.ManagedClusterLister, recorder kevents.EventRecorder) plugins.Handle { + + return &schedulerHandler{ + recorder: recorder, + placementDecisionLister: placementDecisionLister, + scoreLister: scoreLister, + clusterLister: clusterLister, + clusterClient: clusterClient, + } +} + +func (s *schedulerHandler) EventRecorder() kevents.EventRecorder { + return s.recorder +} + +func (s *schedulerHandler) DecisionLister() clusterlisterv1beta1.PlacementDecisionLister { + return s.placementDecisionLister +} + +func (s *schedulerHandler) ScoreLister() clusterlisterv1alpha1.AddOnPlacementScoreLister { + return s.scoreLister +} + +func (s *schedulerHandler) ClusterLister() clusterlisterv1.ManagedClusterLister { + return s.clusterLister +} + +func (s *schedulerHandler) ClusterClient() clusterclient.Interface { + return s.clusterClient +} + +// Initialize the default prioritizer weight. +// Balane and Steady weight 1, others weight 0. +// The default weight can be replaced by each placement's PrioritizerConfigs. +var defaultPrioritizerConfig = map[clusterapiv1beta1.ScoreCoordinate]int32{ + { + Type: clusterapiv1beta1.ScoreCoordinateTypeBuiltIn, + BuiltIn: PrioritizerBalance, + }: 1, + { + Type: clusterapiv1beta1.ScoreCoordinateTypeBuiltIn, + BuiltIn: PrioritizerSteady, + }: 1, +} + +type pluginScheduler struct { + handle plugins.Handle + filters []plugins.Filter + prioritizerWeights map[clusterapiv1beta1.ScoreCoordinate]int32 +} + +func NewPluginScheduler(handle plugins.Handle) *pluginScheduler { + return &pluginScheduler{ + handle: handle, + filters: []plugins.Filter{ + predicate.New(handle), + tainttoleration.New(handle), + }, + prioritizerWeights: defaultPrioritizerConfig, + } +} + +func (s *pluginScheduler) Schedule( + ctx context.Context, + placement *clusterapiv1beta1.Placement, + clusters []*clusterapiv1.ManagedCluster, +) (ScheduleResult, *framework.Status) { + filtered := clusters + finalStatus := framework.NewStatus("", framework.Success, "") + + results := &scheduleResult{ + filteredRecords: map[string][]*clusterapiv1.ManagedCluster{}, + scoreRecords: []PrioritizerResult{}, + } + + // filter clusters + filterPipline := []string{} + + for _, f := range s.filters { + filterResult, status := f.Filter(ctx, placement, filtered) + filtered = filterResult.Filtered + + switch { + case status.IsError(): + return results, status + case status.Code() == framework.Warning: + klog.Warningf("%v", status.Message()) + finalStatus = status + } + + filterPipline = append(filterPipline, f.Name()) + + results.filteredRecords[strings.Join(filterPipline, ",")] = filtered + } + + // Prioritize clusters + // 1. Get weight for each prioritizers. + // For example, weights is {"Steady": 1, "Balance":1, "AddOn/default/ratio":3}. + weights, status := getWeights(s.prioritizerWeights, placement) + switch { + case status.IsError(): + return results, status + case status.Code() == framework.Warning: + klog.Warningf("%v", status.Message()) + finalStatus = status + } + + // 2. Generate prioritizers for each placement whose weight != 0. + prioritizers, status := getPrioritizers(weights, s.handle) + switch { + case status.IsError(): + return results, status + case status.Code() == framework.Warning: + klog.Warningf("%v", status.Message()) + finalStatus = status + } + + // 3. Calculate clusters scores. + scoreSum := PrioritizerScore{} + for _, cluster := range filtered { + scoreSum[cluster.Name] = 0 + } + for sc, p := range prioritizers { + // Get cluster score. + scoreResult, status := p.Score(ctx, placement, filtered) + score := scoreResult.Scores + + switch { + case status.IsError(): + return results, status + case status.Code() == framework.Warning: + klog.Warningf("%v", status.Message()) + finalStatus = status + } + + // Record prioritizer score and weight + weight := weights[sc] + results.scoreRecords = append(results.scoreRecords, PrioritizerResult{Name: p.Name(), Weight: weight, Scores: score}) + + // The final score is a sum of each prioritizer score * weight. + // A higher weight indicates that the prioritizer weights more in the cluster selection, + // while 0 weight indicate thats the prioritizer is disabled. + for name, val := range score { + scoreSum[name] = scoreSum[name] + val*int64(weight) + } + + } + + // 4. Sort clusters by score, if score is equal, sort by name + sort.SliceStable(filtered, func(i, j int) bool { + if scoreSum[filtered[i].Name] == scoreSum[filtered[j].Name] { + return filtered[i].Name < filtered[j].Name + } else { + return scoreSum[filtered[i].Name] > scoreSum[filtered[j].Name] + } + }) + + results.feasibleClusters = filtered + results.scoreSum = scoreSum + + // select clusters and generate cluster decisions + decisions := selectClusters(placement, filtered) + scheduled, unscheduled := len(decisions), 0 + if placement.Spec.NumberOfClusters != nil { + unscheduled = int(*placement.Spec.NumberOfClusters) - scheduled + } + results.scheduledDecisions = decisions + results.unscheduledDecisions = unscheduled + + // set placement requeue time + for _, f := range s.filters { + if r, _ := f.RequeueAfter(ctx, placement); r.RequeueTime != nil { + newRequeueAfter := time.Until(*r.RequeueTime) + results.requeueAfter = setRequeueAfter(results.requeueAfter, &newRequeueAfter) + } + } + for _, p := range prioritizers { + if r, _ := p.RequeueAfter(ctx, placement); r.RequeueTime != nil { + newRequeueAfter := time.Until(*r.RequeueTime) + results.requeueAfter = setRequeueAfter(results.requeueAfter, &newRequeueAfter) + } + } + + return results, finalStatus +} + +// makeClusterDecisions selects clusters based on given cluster slice and then creates +// cluster decisions. +func selectClusters(placement *clusterapiv1beta1.Placement, clusters []*clusterapiv1.ManagedCluster) []clusterapiv1beta1.ClusterDecision { + numOfDecisions := len(clusters) + if placement.Spec.NumberOfClusters != nil { + numOfDecisions = int(*placement.Spec.NumberOfClusters) + } + + // truncate the cluster slice if the desired number of decisions is less than + // the number of the candidate clusters + if numOfDecisions < len(clusters) { + clusters = clusters[:numOfDecisions] + } + + decisions := []clusterapiv1beta1.ClusterDecision{} + for _, cluster := range clusters { + decisions = append(decisions, clusterapiv1beta1.ClusterDecision{ + ClusterName: cluster.Name, + }) + } + return decisions +} + +// setRequeueAfter selects minimal time.Duration as requeue time +func setRequeueAfter(requeueAfter, newRequeueAfter *time.Duration) *time.Duration { + if newRequeueAfter == nil { + return requeueAfter + } + + if requeueAfter == nil || *newRequeueAfter < *requeueAfter { + return newRequeueAfter + } + + return requeueAfter +} + +// Get prioritizer weight for the placement. +// In Additive and "" mode, will override defaultWeight with what placement has defined and return. +// In Exact mode, will return the name and weight defined in placement. +func getWeights(defaultWeight map[clusterapiv1beta1.ScoreCoordinate]int32, placement *clusterapiv1beta1.Placement) (map[clusterapiv1beta1.ScoreCoordinate]int32, *framework.Status) { + mode := placement.Spec.PrioritizerPolicy.Mode + switch { + case mode == clusterapiv1beta1.PrioritizerPolicyModeExact: + return mergeWeights(nil, placement.Spec.PrioritizerPolicy.Configurations) + case mode == clusterapiv1beta1.PrioritizerPolicyModeAdditive || mode == "": + return mergeWeights(defaultWeight, placement.Spec.PrioritizerPolicy.Configurations) + default: + msg := fmt.Sprintf("incorrect prioritizer policy mode: %s", mode) + return nil, framework.NewStatus("", framework.Misconfigured, msg) + } +} + +func mergeWeights(defaultWeight map[clusterapiv1beta1.ScoreCoordinate]int32, customizedWeight []clusterapiv1beta1.PrioritizerConfig) (map[clusterapiv1beta1.ScoreCoordinate]int32, *framework.Status) { + weights := make(map[clusterapiv1beta1.ScoreCoordinate]int32) + status := framework.NewStatus("", framework.Success, "") + // copy the default weight + for sc, w := range defaultWeight { + weights[sc] = w + } + + // override default weight + for _, c := range customizedWeight { + if c.ScoreCoordinate != nil { + weights[*c.ScoreCoordinate] = c.Weight + } else { + return nil, framework.NewStatus("", framework.Misconfigured, "scoreCoordinate field is required") + } + } + return weights, status +} + +// Generate prioritizers for the placement. +func getPrioritizers(weights map[clusterapiv1beta1.ScoreCoordinate]int32, handle plugins.Handle) (map[clusterapiv1beta1.ScoreCoordinate]plugins.Prioritizer, *framework.Status) { + result := make(map[clusterapiv1beta1.ScoreCoordinate]plugins.Prioritizer) + status := framework.NewStatus("", framework.Success, "") + for k, v := range weights { + if v == 0 { + continue + } + if k.Type == clusterapiv1beta1.ScoreCoordinateTypeBuiltIn { + switch { + case k.BuiltIn == PrioritizerBalance: + result[k] = balance.New(handle) + case k.BuiltIn == PrioritizerSteady: + result[k] = steady.New(handle) + case k.BuiltIn == PrioritizerResourceAllocatableCPU || k.BuiltIn == PrioritizerResourceAllocatableMemory: + result[k] = resource.NewResourcePrioritizerBuilder(handle).WithPrioritizerName(k.BuiltIn).Build() + default: + msg := fmt.Sprintf("incorrect builtin prioritizer: %s", k.BuiltIn) + return nil, framework.NewStatus("", framework.Misconfigured, msg) + } + } else { + if k.AddOn == nil { + return nil, framework.NewStatus("", framework.Misconfigured, "addOn should not be empty") + } + result[k] = addon.NewAddOnPrioritizerBuilder(handle).WithResourceName(k.AddOn.ResourceName).WithScoreName(k.AddOn.ScoreName).Build() + } + } + return result, status +} + +func (r *scheduleResult) FilterResults() []FilterResult { + results := []FilterResult{} + + // order the FilterResults by key length + filteredRecordsKey := []string{} + for name := range r.filteredRecords { + filteredRecordsKey = append(filteredRecordsKey, name) + } + sort.SliceStable(filteredRecordsKey, func(i, j int) bool { + return len(filteredRecordsKey[i]) < len(filteredRecordsKey[j]) + }) + + // go through the FilterResults by key length + for _, name := range filteredRecordsKey { + result := FilterResult{Name: name, FilteredClusters: []string{}} + + for _, c := range r.filteredRecords[name] { + result.FilteredClusters = append(result.FilteredClusters, c.Name) + } + results = append(results, result) + } + return results +} + +func (r *scheduleResult) PrioritizerResults() []PrioritizerResult { + return r.scoreRecords +} + +func (r *scheduleResult) PrioritizerScores() PrioritizerScore { + return r.scoreSum +} + +func (r *scheduleResult) Decisions() []clusterapiv1beta1.ClusterDecision { + return r.scheduledDecisions +} + +func (r *scheduleResult) NumOfUnscheduled() int { + return r.unscheduledDecisions +} + +func (r *scheduleResult) RequeueAfter() *time.Duration { + return r.requeueAfter +} diff --git a/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/scheduling/scheduling_controller.go b/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/scheduling/scheduling_controller.go new file mode 100644 index 000000000..32e963642 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/placement/pkg/controllers/scheduling/scheduling_controller.go @@ -0,0 +1,667 @@ +package scheduling + +import ( + "context" + "fmt" + "reflect" + "sort" + "strings" + "time" + + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/events" + errorhelpers "github.com/openshift/library-go/pkg/operator/v1helpers" + corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + cache "k8s.io/client-go/tools/cache" + kevents "k8s.io/client-go/tools/events" + "k8s.io/klog/v2" + clusterclient "open-cluster-management.io/api/client/cluster/clientset/versioned" + clusterinformerv1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1" + clusterinformerv1beta1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta1" + clusterlisterv1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1" + clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" + clusterapiv1 "open-cluster-management.io/api/cluster/v1" + clusterapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + "open-cluster-management.io/placement/pkg/controllers/framework" +) + +const ( + clusterSetLabel = "cluster.open-cluster-management.io/clusterset" + placementLabel = "cluster.open-cluster-management.io/placement" + schedulingControllerName = "SchedulingController" + schedulingControllerResyncName = "SchedulingControllerResync" + maxNumOfClusterDecisions = 100 + maxEventMessageLength = 1000 //the event message can have at most 1024 characters, use 1000 as limitation here to keep some buffer +) + +var ResyncInterval = time.Minute * 5 + +type enqueuePlacementFunc func(namespace, name string) + +// schedulingController schedules cluster decisions for Placements +type schedulingController struct { + clusterClient clusterclient.Interface + clusterLister clusterlisterv1.ManagedClusterLister + clusterSetLister clusterlisterv1beta1.ManagedClusterSetLister + clusterSetBindingLister clusterlisterv1beta1.ManagedClusterSetBindingLister + placementLister clusterlisterv1beta1.PlacementLister + placementDecisionLister clusterlisterv1beta1.PlacementDecisionLister + enqueuePlacementFunc enqueuePlacementFunc + scheduler Scheduler + recorder kevents.EventRecorder +} + +// NewDecisionSchedulingController return an instance of schedulingController +func NewSchedulingController( + clusterClient clusterclient.Interface, + clusterInformer clusterinformerv1.ManagedClusterInformer, + clusterSetInformer clusterinformerv1beta1.ManagedClusterSetInformer, + clusterSetBindingInformer clusterinformerv1beta1.ManagedClusterSetBindingInformer, + placementInformer clusterinformerv1beta1.PlacementInformer, + placementDecisionInformer clusterinformerv1beta1.PlacementDecisionInformer, + scheduler Scheduler, + recorder events.Recorder, krecorder kevents.EventRecorder, +) factory.Controller { + syncCtx := factory.NewSyncContext(schedulingControllerName, recorder) + enqueuePlacementFunc := func(namespace, name string) { + syncCtx.Queue().Add(fmt.Sprintf("%s/%s", namespace, name)) + } + + // build controller + c := &schedulingController{ + clusterClient: clusterClient, + clusterLister: clusterInformer.Lister(), + clusterSetLister: clusterSetInformer.Lister(), + clusterSetBindingLister: clusterSetBindingInformer.Lister(), + placementLister: placementInformer.Lister(), + placementDecisionLister: placementDecisionInformer.Lister(), + enqueuePlacementFunc: enqueuePlacementFunc, + recorder: krecorder, + scheduler: scheduler, + } + + // setup event handler for cluster informer. + // Once a cluster changes, clusterEventHandler enqueues all placements which are + // impacted potentially for further reconciliation. It might not function before the + // informers/listers of clusterset/clustersetbinding/placement are synced during + // controller booting. But that should not cause any problem because all existing + // placements will be enqueued by the controller anyway when booting. + clusterInformer.Informer().AddEventHandler(&clusterEventHandler{ + clusterSetLister: clusterSetInformer.Lister(), + clusterSetBindingLister: clusterSetBindingInformer.Lister(), + placementLister: placementInformer.Lister(), + enqueuePlacementFunc: enqueuePlacementFunc, + }) + + // setup event handler for clusterset informer + // Once a clusterset changes, clusterSetEventHandler enqueues all placements which are + // impacted potentially for further reconciliation. It might not function before the + // informers/listers of clustersetbinding/placement are synced during controller + // booting. But that should not cause any problem because all existing placements will + // be enqueued by the controller anyway when booting. + clusterSetInformer.Informer().AddEventHandler(&clusterSetEventHandler{ + clusterSetBindingLister: clusterSetBindingInformer.Lister(), + placementLister: placementInformer.Lister(), + enqueuePlacementFunc: enqueuePlacementFunc, + }) + + // setup event handler for clustersetbinding informer + // Once a clustersetbinding changes, clusterSetBindingEventHandler enqueues all placements + // which are impacted potentially for further reconciliation. It might not function before + // the informers/listers of clusterset/placement are synced during controller booting. But + // that should not cause any problem because all existing placements will be enqueued by + // the controller anyway when booting. + clusterSetBindingInformer.Informer().AddEventHandler(&clusterSetBindingEventHandler{ + clusterSetLister: clusterSetInformer.Lister(), + placementLister: placementInformer.Lister(), + enqueuePlacementFunc: enqueuePlacementFunc, + }) + + return factory.New(). + WithSyncContext(syncCtx). + WithInformersQueueKeyFunc(func(obj runtime.Object) string { + key, _ := cache.MetaNamespaceKeyFunc(obj) + return key + }, placementInformer.Informer()). + WithFilteredEventsInformersQueueKeyFunc(func(obj runtime.Object) string { + accessor, _ := meta.Accessor(obj) + labels := accessor.GetLabels() + placementName := labels[placementLabel] + return fmt.Sprintf("%s/%s", accessor.GetNamespace(), placementName) + }, func(obj interface{}) bool { + accessor, err := meta.Accessor(obj) + if err != nil { + return false + } + labels := accessor.GetLabels() + if _, ok := labels[placementLabel]; ok { + return true + } + return false + }, placementDecisionInformer.Informer()). + WithBareInformers(clusterInformer.Informer(), clusterSetInformer.Informer(), clusterSetBindingInformer.Informer()). + WithSync(c.sync). + ToController(schedulingControllerName, recorder) +} + +func NewSchedulingControllerResync( + clusterClient clusterclient.Interface, + clusterInformer clusterinformerv1.ManagedClusterInformer, + clusterSetInformer clusterinformerv1beta1.ManagedClusterSetInformer, + clusterSetBindingInformer clusterinformerv1beta1.ManagedClusterSetBindingInformer, + placementInformer clusterinformerv1beta1.PlacementInformer, + placementDecisionInformer clusterinformerv1beta1.PlacementDecisionInformer, + scheduler Scheduler, + recorder events.Recorder, krecorder kevents.EventRecorder, +) factory.Controller { + syncCtx := factory.NewSyncContext(schedulingControllerResyncName, recorder) + enqueuePlacementFunc := func(namespace, name string) { + syncCtx.Queue().Add(fmt.Sprintf("%s/%s", namespace, name)) + } + + // build controller + c := &schedulingController{ + clusterClient: clusterClient, + clusterLister: clusterInformer.Lister(), + clusterSetLister: clusterSetInformer.Lister(), + clusterSetBindingLister: clusterSetBindingInformer.Lister(), + placementLister: placementInformer.Lister(), + placementDecisionLister: placementDecisionInformer.Lister(), + enqueuePlacementFunc: enqueuePlacementFunc, + recorder: krecorder, + scheduler: scheduler, + } + + return factory.New(). + WithSyncContext(syncCtx). + WithSync(c.resync). + ResyncEvery(ResyncInterval). + ToController(schedulingControllerResyncName, recorder) + +} + +// Resync the placement which depends on AddOnPlacementScore periodically +func (c *schedulingController) resync(ctx context.Context, syncCtx factory.SyncContext) error { + queueKey := syncCtx.QueueKey() + klog.V(4).Infof("Resync placement %q", queueKey) + + if queueKey == "key" { + placements, err := c.placementLister.List(labels.Everything()) + if err != nil { + return err + } + + for _, placement := range placements { + for _, config := range placement.Spec.PrioritizerPolicy.Configurations { + if config.ScoreCoordinate != nil && config.ScoreCoordinate.Type == clusterapiv1beta1.ScoreCoordinateTypeAddOn { + key, _ := cache.MetaNamespaceKeyFunc(placement) + klog.V(4).Infof("Requeue placement %s", key) + syncCtx.Queue().Add(key) + break + } + } + } + + return nil + } else { + placement, err := c.getPlacement(queueKey) + if errors.IsNotFound(err) { + // no work if placement is deleted + return nil + } + if err != nil { + return err + } + // Do not pass syncCtx to syncPlacement, since don't want to requeue the placement when resyncing the placement. + return c.syncPlacement(ctx, nil, placement) + } +} + +func (c *schedulingController) sync(ctx context.Context, syncCtx factory.SyncContext) error { + queueKey := syncCtx.QueueKey() + klog.V(4).Infof("Reconciling placement %q", queueKey) + + placement, err := c.getPlacement(queueKey) + if errors.IsNotFound(err) { + // no work if placement is deleted + return nil + } + if err != nil { + return err + } + + return c.syncPlacement(ctx, syncCtx, placement) +} + +func (c *schedulingController) getPlacement(queueKey string) (*clusterapiv1beta1.Placement, error) { + namespace, name, err := cache.SplitMetaNamespaceKey(queueKey) + if err != nil { + // ignore placement whose key is not in format: namespace/name + utilruntime.HandleError(err) + return nil, nil + } + + placement, err := c.placementLister.Placements(namespace).Get(name) + if err != nil { + return nil, err + } + + return placement, nil +} + +func (c *schedulingController) syncPlacement(ctx context.Context, syncCtx factory.SyncContext, placement *clusterapiv1beta1.Placement) error { + // no work if placement is deleting + if !placement.DeletionTimestamp.IsZero() { + return nil + } + + // no work if placement has cluster.open-cluster-management.io/experimental-scheduling-disable: "true" annotation + if value, ok := placement.GetAnnotations()[clusterapiv1beta1.PlacementDisableAnnotation]; ok && value == "true" { + return nil + } + + // get all valid clustersetbindings in the placement namespace + bindings, err := c.getValidManagedClusterSetBindings(placement.Namespace) + if err != nil { + return err + } + + // get eligible clustersets for the placement + clusterSetNames := c.getEligibleClusterSets(placement, bindings) + + // get available clusters for the placement + clusters, err := c.getAvailableClusters(clusterSetNames) + if err != nil { + return err + } + + // schedule placement with scheduler + scheduleResult, status := c.scheduler.Schedule(ctx, placement, clusters) + misconfiguredCondition := newMisconfiguredCondition(status) + satisfiedCondition := newSatisfiedCondition( + placement.Spec.ClusterSets, + clusterSetNames, + len(bindings), + len(clusters), + len(scheduleResult.Decisions()), + scheduleResult.NumOfUnscheduled(), + status, + ) + + // update status and return error if schedule() returns error + if status.IsError() { + if err := c.updateStatus(ctx, placement, int32(len(scheduleResult.Decisions())), misconfiguredCondition, satisfiedCondition); err != nil { + return err + } + return status.AsError() + } + + // requeue placement if requeueAfter is defined in scheduleResult + if syncCtx != nil && scheduleResult.RequeueAfter() != nil { + key, _ := cache.MetaNamespaceKeyFunc(placement) + t := scheduleResult.RequeueAfter() + klog.V(4).Infof("Requeue placement %s after %t", key, t) + syncCtx.Queue().AddAfter(key, *t) + } + + err = c.bind(ctx, placement, scheduleResult.Decisions(), scheduleResult.PrioritizerScores(), status) + if err != nil { + return err + } + + // update placement status if necessary to signal no bindings + return c.updateStatus(ctx, placement, int32(len(scheduleResult.Decisions())), misconfiguredCondition, satisfiedCondition) +} + +// getManagedClusterSetBindings returns all bindings found in the placement namespace. +func (c *schedulingController) getValidManagedClusterSetBindings(placementNamespace string) ([]*clusterapiv1beta1.ManagedClusterSetBinding, error) { + // get all clusterset bindings under the placement namespace + bindings, err := c.clusterSetBindingLister.ManagedClusterSetBindings(placementNamespace).List(labels.Everything()) + if err != nil { + return nil, err + } + if len(bindings) == 0 { + bindings = nil + } + + validBindings := []*clusterapiv1beta1.ManagedClusterSetBinding{} + for _, binding := range bindings { + // ignore clustersetbinding refers to a non-existent clusterset + _, err := c.clusterSetLister.Get(binding.Name) + if errors.IsNotFound(err) { + continue + } + if err != nil { + return nil, err + } + validBindings = append(validBindings, binding) + } + + return validBindings, nil +} + +// getEligibleClusterSets returns the names of clusterset that eligible for the placement +func (c *schedulingController) getEligibleClusterSets(placement *clusterapiv1beta1.Placement, bindings []*clusterapiv1beta1.ManagedClusterSetBinding) []string { + // filter out invaid clustersetbindings + clusterSetNames := sets.NewString() + for _, binding := range bindings { + clusterSetNames.Insert(binding.Name) + } + + // get intersection of clustesets bound to placement namespace and clustesets specified + // in placement spec + if len(placement.Spec.ClusterSets) != 0 { + clusterSetNames = clusterSetNames.Intersection(sets.NewString(placement.Spec.ClusterSets...)) + } + + return clusterSetNames.List() +} + +// getAvailableClusters returns available clusters for the given placement. The clusters must +// 1) Be from clustersets bound to the placement namespace; +// 2) Belong to one of particular clustersets if .spec.clusterSets is specified; +func (c *schedulingController) getAvailableClusters(clusterSetNames []string) ([]*clusterapiv1.ManagedCluster, error) { + if len(clusterSetNames) == 0 { + return nil, nil + } + //all available clusters + availableClusters := map[string]*clusterapiv1.ManagedCluster{} + + for _, name := range clusterSetNames { + // ignore clusterset if failed to get + clusterSet, err := c.clusterSetLister.Get(name) + if errors.IsNotFound(err) { + continue + } + if err != nil { + return nil, err + } + clusters, err := clusterapiv1beta1.GetClustersFromClusterSet(clusterSet, c.clusterLister) + if err != nil { + return nil, fmt.Errorf("failed to get clusterset: %v, clusters, Error: %v", clusterSet.Name, err) + } + for i := range clusters { + availableClusters[clusters[i].Name] = clusters[i] + } + } + + if len(availableClusters) == 0 { + return nil, nil + } + + result := []*clusterapiv1.ManagedCluster{} + for _, c := range availableClusters { + result = append(result, c) + } + + return result, nil +} + +// updateStatus updates the status of the placement according to intermediate scheduling data. +func (c *schedulingController) updateStatus( + ctx context.Context, + placement *clusterapiv1beta1.Placement, + numberOfSelectedClusters int32, + conditions ...metav1.Condition, +) error { + newPlacement := placement.DeepCopy() + newPlacement.Status.NumberOfSelectedClusters = numberOfSelectedClusters + + for _, c := range conditions { + meta.SetStatusCondition(&newPlacement.Status.Conditions, c) + } + if reflect.DeepEqual(newPlacement.Status, placement.Status) { + return nil + } + + _, err := c.clusterClient.ClusterV1beta1(). + Placements(newPlacement.Namespace). + UpdateStatus(ctx, newPlacement, metav1.UpdateOptions{}) + return err +} + +// newSatisfiedCondition returns a new condition with type PlacementConditionSatisfied +func newSatisfiedCondition( + clusterSetsInSpec []string, + eligibleClusterSets []string, + numOfBindings, + numOfAvailableClusters, + numOfFeasibleClusters, + numOfUnscheduledDecisions int, + status *framework.Status, +) metav1.Condition { + condition := metav1.Condition{ + Type: clusterapiv1beta1.PlacementConditionSatisfied, + } + switch { + case numOfBindings == 0: + condition.Status = metav1.ConditionFalse + condition.Reason = "NoManagedClusterSetBindings" + condition.Message = "No valid ManagedClusterSetBindings found in placement namespace" + case len(eligibleClusterSets) == 0: + condition.Status = metav1.ConditionFalse + condition.Reason = "NoIntersection" + condition.Message = fmt.Sprintf("None of ManagedClusterSets [%s] is bound to placement namespace", strings.Join(clusterSetsInSpec, ",")) + case numOfAvailableClusters == 0: + condition.Status = metav1.ConditionFalse + condition.Reason = "AllManagedClusterSetsEmpty" + condition.Message = fmt.Sprintf("All ManagedClusterSets [%s] have no member ManagedCluster", strings.Join(eligibleClusterSets, ",")) + case status.Code() == framework.Error: + condition.Status = metav1.ConditionFalse + condition.Reason = "NotAllDecisionsScheduled" + condition.Message = status.AsError().Error() + case numOfFeasibleClusters == 0: + condition.Status = metav1.ConditionFalse + condition.Reason = "NoManagedClusterMatched" + condition.Message = "No ManagedCluster matches any of the cluster predicate" + case numOfUnscheduledDecisions == 0: + condition.Status = metav1.ConditionTrue + condition.Reason = "AllDecisionsScheduled" + condition.Message = "All cluster decisions scheduled" + default: + condition.Status = metav1.ConditionFalse + condition.Reason = "NotAllDecisionsScheduled" + condition.Message = fmt.Sprintf("%d cluster decisions unscheduled", numOfUnscheduledDecisions) + } + return condition +} + +func newMisconfiguredCondition(status *framework.Status) metav1.Condition { + if status.Code() == framework.Misconfigured { + return metav1.Condition{ + Type: clusterapiv1beta1.PlacementConditionMisconfigured, + Status: metav1.ConditionTrue, + Reason: "Misconfigured", + Message: fmt.Sprintf("%s:%s", status.Plugin(), status.Message()), + } + } else { + return metav1.Condition{ + Type: clusterapiv1beta1.PlacementConditionMisconfigured, + Status: metav1.ConditionFalse, + Reason: "Succeedconfigured", + Message: "Placement configurations check pass", + } + } +} + +// bind updates the cluster decisions in the status of the placementdecisions with the given +// cluster decision slice. New placementdecisions will be created if no one exists. +func (c *schedulingController) bind( + ctx context.Context, + placement *clusterapiv1beta1.Placement, + clusterDecisions []clusterapiv1beta1.ClusterDecision, + clusterScores PrioritizerScore, + status *framework.Status, +) error { + // sort clusterdecisions by cluster name + sort.SliceStable(clusterDecisions, func(i, j int) bool { + return clusterDecisions[i].ClusterName < clusterDecisions[j].ClusterName + }) + + // split the cluster decisions into slices, the size of each slice cannot exceed + // maxNumOfClusterDecisions. + decisionSlices := [][]clusterapiv1beta1.ClusterDecision{} + remainingDecisions := clusterDecisions + for index := 0; len(remainingDecisions) > 0; index++ { + var decisionSlice []clusterapiv1beta1.ClusterDecision + switch { + case len(remainingDecisions) > maxNumOfClusterDecisions: + decisionSlice = remainingDecisions[0:maxNumOfClusterDecisions] + remainingDecisions = remainingDecisions[maxNumOfClusterDecisions:] + default: + decisionSlice = remainingDecisions + remainingDecisions = nil + } + decisionSlices = append(decisionSlices, decisionSlice) + } + + // bind cluster decision slices to placementdecisions. + errs := []error{} + + placementDecisionNames := sets.NewString() + for index, decisionSlice := range decisionSlices { + placementDecisionName := fmt.Sprintf("%s-decision-%d", placement.Name, index+1) + placementDecisionNames.Insert(placementDecisionName) + err := c.createOrUpdatePlacementDecision( + ctx, placement, placementDecisionName, decisionSlice, clusterScores, status) + if err != nil { + errs = append(errs, err) + } + } + if len(errs) != 0 { + return errorhelpers.NewMultiLineAggregate(errs) + } + + // query all placementdecisions of the placement + requirement, err := labels.NewRequirement(placementLabel, selection.Equals, []string{placement.Name}) + if err != nil { + return err + } + labelSelector := labels.NewSelector().Add(*requirement) + placementDecisions, err := c.placementDecisionLister.PlacementDecisions(placement.Namespace).List(labelSelector) + if err != nil { + return err + } + + // delete redundant placementdecisions + errs = []error{} + for _, placementDecision := range placementDecisions { + if placementDecisionNames.Has(placementDecision.Name) { + continue + } + err := c.clusterClient.ClusterV1beta1().PlacementDecisions( + placementDecision.Namespace).Delete(ctx, placementDecision.Name, metav1.DeleteOptions{}) + if errors.IsNotFound(err) { + continue + } + if err != nil { + errs = append(errs, err) + } + c.recorder.Eventf( + placement, placementDecision, corev1.EventTypeNormal, + "DecisionDelete", "DecisionDeleted", + "Decision %s is deleted with placement %s in namespace %s", placementDecision.Name, placement.Name, placement.Namespace) + } + return errorhelpers.NewMultiLineAggregate(errs) +} + +// createOrUpdatePlacementDecision creates a new PlacementDecision if it does not exist and +// then updates the status with the given ClusterDecision slice if necessary +func (c *schedulingController) createOrUpdatePlacementDecision( + ctx context.Context, + placement *clusterapiv1beta1.Placement, + placementDecisionName string, + clusterDecisions []clusterapiv1beta1.ClusterDecision, + clusterScores PrioritizerScore, + status *framework.Status, +) error { + if len(clusterDecisions) > maxNumOfClusterDecisions { + return fmt.Errorf("the number of clusterdecisions %q exceeds the max limitation %q", len(clusterDecisions), maxNumOfClusterDecisions) + } + + placementDecision, err := c.placementDecisionLister.PlacementDecisions(placement.Namespace).Get(placementDecisionName) + switch { + case errors.IsNotFound(err): + // create the placementdecision if not exists + owner := metav1.NewControllerRef(placement, clusterapiv1beta1.GroupVersion.WithKind("Placement")) + placementDecision = &clusterapiv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: placementDecisionName, + Namespace: placement.Namespace, + Labels: map[string]string{ + placementLabel: placement.Name, + }, + OwnerReferences: []metav1.OwnerReference{*owner}, + }, + } + var err error + placementDecision, err = c.clusterClient.ClusterV1beta1().PlacementDecisions( + placement.Namespace).Create(ctx, placementDecision, metav1.CreateOptions{}) + if err != nil { + return err + } + c.recorder.Eventf( + placement, placementDecision, corev1.EventTypeNormal, + "DecisionCreate", "DecisionCreated", + "Decision %s is created with placement %s in namespace %s", placementDecision.Name, placement.Name, placement.Namespace) + case err != nil: + return err + } + + // update the status of the placementdecision if decisions change + if apiequality.Semantic.DeepEqual(placementDecision.Status.Decisions, clusterDecisions) { + return nil + } + + newPlacementDecision := placementDecision.DeepCopy() + newPlacementDecision.Status.Decisions = clusterDecisions + newPlacementDecision, err = c.clusterClient.ClusterV1beta1().PlacementDecisions(newPlacementDecision.Namespace). + UpdateStatus(ctx, newPlacementDecision, metav1.UpdateOptions{}) + + if err != nil { + return err + } + + // update the event with warning + if status.Code() == framework.Warning { + c.recorder.Eventf( + placement, placementDecision, corev1.EventTypeWarning, + "DecisionUpdate", "DecisionUpdated", + "Decision %s is updated with placement %s in namespace %s: %s in plugin %s", placementDecision.Name, placement.Name, placement.Namespace, + status.Message(), + status.Plugin()) + } else { + c.recorder.Eventf( + placement, placementDecision, corev1.EventTypeNormal, + "DecisionUpdate", "DecisionUpdated", + "Decision %s is updated with placement %s in namespace %s", placementDecision.Name, placement.Name, placement.Namespace) + } + + // update the event with prioritizer score. + scoreStr := "" + for k, v := range clusterScores { + tmpScore := fmt.Sprintf("%s:%d ", k, v) + if len(scoreStr)+len(tmpScore) > maxEventMessageLength { + scoreStr += "......" + break + } else { + scoreStr += tmpScore + } + } + + c.recorder.Eventf( + placement, placementDecision, corev1.EventTypeNormal, + "ScoreUpdate", "ScoreUpdated", + scoreStr) + + return nil +} diff --git a/controlplane/vendor/open-cluster-management.io/placement/pkg/debugger/debugger.go b/controlplane/vendor/open-cluster-management.io/placement/pkg/debugger/debugger.go new file mode 100644 index 000000000..c50237b83 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/placement/pkg/debugger/debugger.go @@ -0,0 +1,83 @@ +package debugger + +import ( + "encoding/json" + "net/http" + "strings" + + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + clusterinformerv1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1" + clusterinformerv1beta1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta1" + clusterlisterv1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1" + clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" + scheduling "open-cluster-management.io/placement/pkg/controllers/scheduling" +) + +const DebugPath = "/debug/placements/" + +// Debugger provides a debug http endpoint for scheduler +type Debugger struct { + scheduler scheduling.Scheduler + clusterLister clusterlisterv1.ManagedClusterLister + placementLister clusterlisterv1beta1.PlacementLister +} + +// DebugResult is the result returned by debugger +type DebugResult struct { + FilterResults []scheduling.FilterResult `json:"filteredPiplieResults,omitempty"` + PrioritizeResults []scheduling.PrioritizerResult `json:"prioritizeResults,omitempty"` + Error string `json:"error,omitempty"` +} + +func NewDebugger( + scheduler scheduling.Scheduler, + placementInformer clusterinformerv1beta1.PlacementInformer, + clusterInformer clusterinformerv1.ManagedClusterInformer) *Debugger { + return &Debugger{ + scheduler: scheduler, + clusterLister: clusterInformer.Lister(), + placementLister: placementInformer.Lister(), + } +} + +func (d *Debugger) Handler(w http.ResponseWriter, r *http.Request) { + namespace, name, err := d.parsePath(r.URL.Path) + if err != nil { + d.reportErr(w, err) + return + } + + placement, err := d.placementLister.Placements(namespace).Get(name) + if err != nil { + d.reportErr(w, err) + return + } + + clusters, err := d.clusterLister.List(labels.Everything()) + if err != nil { + d.reportErr(w, err) + return + } + + scheduleResults, _ := d.scheduler.Schedule(r.Context(), placement, clusters) + + result := DebugResult{FilterResults: scheduleResults.FilterResults(), PrioritizeResults: scheduleResults.PrioritizerResults()} + + resultByte, _ := json.Marshal(result) + + w.Write(resultByte) +} + +func (d *Debugger) parsePath(path string) (string, string, error) { + metaNamespaceKey := strings.TrimPrefix(path, DebugPath) + return cache.SplitMetaNamespaceKey(metaNamespaceKey) +} + +func (d *Debugger) reportErr(w http.ResponseWriter, err error) { + result := &DebugResult{Error: err.Error()} + + resultByte, _ := json.Marshal(result) + + w.Write(resultByte) +} diff --git a/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/addon/addon.go b/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/addon/addon.go new file mode 100644 index 000000000..973b0d8e6 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/addon/addon.go @@ -0,0 +1,115 @@ +package addon + +import ( + "context" + "fmt" + + "k8s.io/klog/v2" + "k8s.io/utils/clock" + clusterapiv1 "open-cluster-management.io/api/cluster/v1" + clusterapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + "open-cluster-management.io/placement/pkg/controllers/framework" + "open-cluster-management.io/placement/pkg/plugins" +) + +const ( + placementLabel = clusterapiv1beta1.PlacementLabel + description = ` + Customize prioritizer get cluster scores from AddOnPlacementScores with sepcific + resource name and score name. The clusters which doesn't have corresponding + AddOnPlacementScores resource or has expired score is given score 0. + ` +) + +var _ plugins.Prioritizer = &AddOn{} +var AddOnClock = (clock.Clock)(clock.RealClock{}) + +type AddOn struct { + handle plugins.Handle + prioritizerName string + resourceName string + scoreName string +} + +type AddOnBuilder struct { + addOn *AddOn +} + +func NewAddOnPrioritizerBuilder(handle plugins.Handle) *AddOnBuilder { + return &AddOnBuilder{ + addOn: &AddOn{ + handle: handle, + }, + } +} + +func (c *AddOnBuilder) WithResourceName(name string) *AddOnBuilder { + c.addOn.resourceName = name + return c +} + +func (c *AddOnBuilder) WithScoreName(name string) *AddOnBuilder { + c.addOn.scoreName = name + return c +} + +func (c *AddOnBuilder) Build() *AddOn { + c.addOn.prioritizerName = "AddOn" + "/" + c.addOn.resourceName + "/" + c.addOn.scoreName + return c.addOn +} + +func (c *AddOn) Name() string { + return c.prioritizerName +} + +func (c *AddOn) Description() string { + return description +} + +func (c *AddOn) Score(ctx context.Context, placement *clusterapiv1beta1.Placement, clusters []*clusterapiv1.ManagedCluster) (plugins.PluginScoreResult, *framework.Status) { + scores := map[string]int64{} + expiredScores := "" + status := framework.NewStatus(c.Name(), framework.Success, "") + + for _, cluster := range clusters { + namespace := cluster.Name + // default score is 0 + scores[cluster.Name] = 0 + + // get AddOnPlacementScores CR with resourceName + addOnScores, err := c.handle.ScoreLister().AddOnPlacementScores(namespace).Get(c.resourceName) + if err != nil { + klog.Warningf("Getting AddOnPlacementScores failed: %s", err) + continue + } + + // check score valid time + if (addOnScores.Status.ValidUntil != nil) && AddOnClock.Now().After(addOnScores.Status.ValidUntil.Time) { + expiredScores = fmt.Sprintf("%s %s/%s", expiredScores, namespace, c.resourceName) + continue + } + + // get AddOnPlacementScores score with scoreName + for _, v := range addOnScores.Status.Scores { + if v.Name == c.scoreName { + scores[cluster.Name] = int64(v.Value) + } + } + } + + if len(expiredScores) > 0 { + status = framework.NewStatus( + c.Name(), + framework.Warning, + fmt.Sprintf("AddOnPlacementScores%s expired", expiredScores), + ) + } + + return plugins.PluginScoreResult{ + Scores: scores, + }, status +} + +func (c *AddOn) RequeueAfter(ctx context.Context, placement *clusterapiv1beta1.Placement) (plugins.PluginRequeueResult, *framework.Status) { + return plugins.PluginRequeueResult{}, framework.NewStatus(c.Name(), framework.Success, "") +} diff --git a/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/balance/balance.go b/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/balance/balance.go new file mode 100644 index 000000000..63f4e9f28 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/balance/balance.go @@ -0,0 +1,90 @@ +package balance + +import ( + "context" + "reflect" + + "k8s.io/apimachinery/pkg/labels" + clusterapiv1 "open-cluster-management.io/api/cluster/v1" + clusterapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + "open-cluster-management.io/placement/pkg/controllers/framework" + "open-cluster-management.io/placement/pkg/plugins" +) + +const ( + placementLabel = "cluster.open-cluster-management.io/placement" + description = ` + Balance prioritizer balance the number of decisions among the clusters. The cluster + with the highest number of decison is given the lowest score, while the empty cluster is given + the highest score. + ` +) + +var _ plugins.Prioritizer = &Balance{} + +type Balance struct { + handle plugins.Handle +} + +func New(handle plugins.Handle) *Balance { + return &Balance{ + handle: handle, + } +} + +func (b *Balance) Name() string { + return reflect.TypeOf(*b).Name() +} + +func (b *Balance) Description() string { + return description +} + +func (b *Balance) Score(ctx context.Context, placement *clusterapiv1beta1.Placement, clusters []*clusterapiv1.ManagedCluster) (plugins.PluginScoreResult, *framework.Status) { + scores := map[string]int64{} + for _, cluster := range clusters { + scores[cluster.Name] = plugins.MaxClusterScore + } + + decisions, err := b.handle.DecisionLister().List(labels.Everything()) + if err != nil { + return plugins.PluginScoreResult{}, framework.NewStatus( + b.Name(), + framework.Error, + err.Error(), + ) + } + + var maxCount int64 + decisionCount := map[string]int64{} + for _, decision := range decisions { + // Do not count the decision that is being scheduled. + if decision.Labels[placementLabel] == placement.Name && decision.Namespace == placement.Namespace { + continue + } + for _, d := range decision.Status.Decisions { + decisionCount[d.ClusterName] = decisionCount[d.ClusterName] + 1 + if decisionCount[d.ClusterName] > maxCount { + maxCount = decisionCount[d.ClusterName] + } + } + } + + for clusterName := range scores { + if count, ok := decisionCount[clusterName]; ok { + usage := float64(count) / float64(maxCount) + + // Negate the usage and substracted by 0.5, then we double it and muliply by maxCount, + // which normalize the score to value between 100 and -100 + scores[clusterName] = 2 * int64(float64(plugins.MaxClusterScore)*(0.5-usage)) + } + } + + return plugins.PluginScoreResult{ + Scores: scores, + }, framework.NewStatus(b.Name(), framework.Success, "") +} + +func (b *Balance) RequeueAfter(ctx context.Context, placement *clusterapiv1beta1.Placement) (plugins.PluginRequeueResult, *framework.Status) { + return plugins.PluginRequeueResult{}, framework.NewStatus(b.Name(), framework.Success, "") +} diff --git a/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/interface.go b/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/interface.go new file mode 100644 index 000000000..85fb113e1 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/interface.go @@ -0,0 +1,91 @@ +package plugins + +import ( + "context" + "math" + "time" + + "k8s.io/client-go/tools/events" + clusterclient "open-cluster-management.io/api/client/cluster/clientset/versioned" + clusterlisterv1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1" + clusterlisterv1alpha1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1alpha1" + clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" + clusterapiv1 "open-cluster-management.io/api/cluster/v1" + clusterapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + "open-cluster-management.io/placement/pkg/controllers/framework" +) + +const ( + // MaxClusterScore is the maximum score a Prioritizer plugin is expected to return. + MaxClusterScore int64 = 100 + + // MinClusterScore is the minimum score a Prioritizer plugin is expected to return. + MinClusterScore int64 = -100 + + // MaxTotalScore is the maximum total score. + MaxTotalScore int64 = math.MaxInt64 +) + +// Plugin is the parent type for all the scheduling plugins. +type Plugin interface { + Name() string + // Set is to set the placement for the current scheduling. + Description() string + // RequeueAfter returns the requeue time interval of the placement + RequeueAfter(ctx context.Context, placement *clusterapiv1beta1.Placement) (PluginRequeueResult, *framework.Status) +} + +// Fitler defines a filter plugin that filter unsatisfied cluster. +type Filter interface { + Plugin + + // Filter returns a list of clusters satisfying the certain condition. + Filter(ctx context.Context, placement *clusterapiv1beta1.Placement, clusters []*clusterapiv1.ManagedCluster) (PluginFilterResult, *framework.Status) +} + +// Prioritizer defines a prioritizer plugin that score each cluster. The score is normalized +// as a floating betwween 0 and 1. +type Prioritizer interface { + Plugin + + // Score gives the score to a list of the clusters, it returns a map with the key as + // the cluster name. + Score(ctx context.Context, placement *clusterapiv1beta1.Placement, clusters []*clusterapiv1.ManagedCluster) (PluginScoreResult, *framework.Status) +} + +// Handle provides data and some tools that plugins can use. It is +// passed to the plugin factories at the time of plugin initialization. +type Handle interface { + // DecisionLister lists all decisions + DecisionLister() clusterlisterv1beta1.PlacementDecisionLister + + // ScoreLister lists all AddOnPlacementScores + ScoreLister() clusterlisterv1alpha1.AddOnPlacementScoreLister + + // ClusterLister lists all ManagedClusters + ClusterLister() clusterlisterv1.ManagedClusterLister + + // ClusterClient returns the cluster client + ClusterClient() clusterclient.Interface + + // EventRecorder returns an event recorder. + EventRecorder() events.EventRecorder +} + +// PluginFilterResult contains the details of a filter plugin result. +type PluginFilterResult struct { + // Filtered contains the filtered ManagedCluster. + Filtered []*clusterapiv1.ManagedCluster +} + +// PluginScoreResult contains the details of a score plugin result. +type PluginScoreResult struct { + // Scores contains the ManagedCluster scores. + Scores map[string]int64 +} + +// PluginRequeueResult contains the requeue result of a placement. +type PluginRequeueResult struct { + // RequeueTime contains the expect requeue time. + RequeueTime *time.Time +} diff --git a/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/predicate/predicate.go b/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/predicate/predicate.go new file mode 100644 index 000000000..4e86a806d --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/predicate/predicate.go @@ -0,0 +1,137 @@ +package predicate + +import ( + "context" + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + + clusterapiv1 "open-cluster-management.io/api/cluster/v1" + clusterapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + "open-cluster-management.io/placement/pkg/controllers/framework" + "open-cluster-management.io/placement/pkg/plugins" +) + +var _ plugins.Filter = &Predicate{} + +const description = "Predicate filter filters the clusters based on predicate defined in placement" + +type Predicate struct{} + +type predicateSelector struct { + labelSelector labels.Selector + claimSelector labels.Selector +} + +func New(handle plugins.Handle) *Predicate { + return &Predicate{} +} + +func (p *Predicate) Name() string { + return reflect.TypeOf(*p).Name() +} + +func (p *Predicate) Description() string { + return description +} + +func (p *Predicate) Filter( + ctx context.Context, placement *clusterapiv1beta1.Placement, clusters []*clusterapiv1.ManagedCluster) (plugins.PluginFilterResult, *framework.Status) { + status := framework.NewStatus(p.Name(), framework.Success, "") + + if len(placement.Spec.Predicates) == 0 { + return plugins.PluginFilterResult{ + Filtered: clusters, + }, status + } + if len(clusters) == 0 { + return plugins.PluginFilterResult{ + Filtered: clusters, + }, status + } + + // prebuild label/claim selectors for each predicate + predicateSelectors := []predicateSelector{} + for _, predicate := range placement.Spec.Predicates { + // build label selector + labelSelector, err := convertLabelSelector(predicate.RequiredClusterSelector.LabelSelector) + if err != nil { + return plugins.PluginFilterResult{}, framework.NewStatus( + p.Name(), + framework.Misconfigured, + err.Error(), + ) + } + // build claim selector + claimSelector, err := convertClaimSelector(predicate.RequiredClusterSelector.ClaimSelector) + if err != nil { + return plugins.PluginFilterResult{}, framework.NewStatus( + p.Name(), + framework.Misconfigured, + err.Error(), + ) + } + predicateSelectors = append(predicateSelectors, predicateSelector{ + labelSelector: labelSelector, + claimSelector: claimSelector, + }) + } + + // match cluster with selectors one by one + matched := []*clusterapiv1.ManagedCluster{} + for _, cluster := range clusters { + claims := getClusterClaims(cluster) + for _, ps := range predicateSelectors { + // match with label selector + if ok := ps.labelSelector.Matches(labels.Set(cluster.Labels)); !ok { + continue + } + // match with claim selector + if ok := ps.claimSelector.Matches(labels.Set(claims)); !ok { + continue + } + matched = append(matched, cluster) + break + } + } + + return plugins.PluginFilterResult{ + Filtered: matched, + }, status +} + +func (p *Predicate) RequeueAfter(ctx context.Context, placement *clusterapiv1beta1.Placement) (plugins.PluginRequeueResult, *framework.Status) { + return plugins.PluginRequeueResult{}, framework.NewStatus(p.Name(), framework.Success, "") +} + +// getClusterClaims returns a map containing cluster claims from the status of cluster +func getClusterClaims(cluster *clusterapiv1.ManagedCluster) map[string]string { + claims := map[string]string{} + for _, claim := range cluster.Status.ClusterClaims { + claims[claim.Name] = claim.Value + } + return claims +} + +// convertLabelSelector converts metav1.LabelSelector to labels.Selector +func convertLabelSelector(labelSelector metav1.LabelSelector) (labels.Selector, error) { + selector, err := metav1.LabelSelectorAsSelector(&labelSelector) + if err != nil { + return labels.Nothing(), err + } + + return selector, nil +} + +// convertClaimSelector converts ClusterClaimSelector to labels.Selector +func convertClaimSelector(clusterClaimSelector clusterapiv1beta1.ClusterClaimSelector) (labels.Selector, error) { + selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ + MatchExpressions: clusterClaimSelector.MatchExpressions, + }) + if err != nil { + return labels.Nothing(), err + } + + return selector, nil +} diff --git a/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/resource/resource.go b/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/resource/resource.go new file mode 100644 index 000000000..1d6e97ae1 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/resource/resource.go @@ -0,0 +1,164 @@ +package resource + +import ( + "context" + "fmt" + "regexp" + "sort" + + clusterapiv1 "open-cluster-management.io/api/cluster/v1" + clusterapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + "open-cluster-management.io/placement/pkg/controllers/framework" + "open-cluster-management.io/placement/pkg/plugins" +) + +const ( + placementLabel = clusterapiv1beta1.PlacementLabel + description = ` + ResourceAllocatableCPU and ResourceAllocatableMemory prioritizer makes the scheduling + decisions based on the resource allocatable of managed clusters. + The clusters that has the most allocatable are given the highest score, + while the least is given the lowest score. + ` +) + +var _ plugins.Prioritizer = &ResourcePrioritizer{} + +var resourceMap = map[string]clusterapiv1.ResourceName{ + "CPU": clusterapiv1.ResourceCPU, + "Memory": clusterapiv1.ResourceMemory, +} + +type ResourcePrioritizer struct { + handle plugins.Handle + prioritizerName string + algorithm string + resource clusterapiv1.ResourceName +} + +type ResourcePrioritizerBuilder struct { + resourcePrioritizer *ResourcePrioritizer +} + +func NewResourcePrioritizerBuilder(handle plugins.Handle) *ResourcePrioritizerBuilder { + return &ResourcePrioritizerBuilder{ + resourcePrioritizer: &ResourcePrioritizer{ + handle: handle, + }, + } +} + +func (r *ResourcePrioritizerBuilder) WithPrioritizerName(name string) *ResourcePrioritizerBuilder { + r.resourcePrioritizer.prioritizerName = name + return r +} + +func (r *ResourcePrioritizerBuilder) Build() *ResourcePrioritizer { + algorithm, resource := parsePrioritizerName(r.resourcePrioritizer.prioritizerName) + r.resourcePrioritizer.algorithm = algorithm + r.resourcePrioritizer.resource = resource + return r.resourcePrioritizer +} + +// parese prioritizerName to algorithm and resource. +// For example, prioritizerName ResourceAllocatableCPU will return Allocatable, CPU. +func parsePrioritizerName(prioritizerName string) (algorithm string, resource clusterapiv1.ResourceName) { + s := regexp.MustCompile("[A-Z]+[a-z]*").FindAllString(prioritizerName, -1) + if len(s) == 3 { + return s[1], resourceMap[s[2]] + } + return "", "" +} + +func (r *ResourcePrioritizer) Name() string { + return r.prioritizerName +} + +func (r *ResourcePrioritizer) Description() string { + return description +} + +func (r *ResourcePrioritizer) Score(ctx context.Context, placement *clusterapiv1beta1.Placement, clusters []*clusterapiv1.ManagedCluster) (plugins.PluginScoreResult, *framework.Status) { + status := framework.NewStatus(r.Name(), framework.Success, "") + if r.algorithm == "Allocatable" { + return mostResourceAllocatableScores(r.resource, clusters), status + } + return plugins.PluginScoreResult{}, status +} + +func (r *ResourcePrioritizer) RequeueAfter(ctx context.Context, placement *clusterapiv1beta1.Placement) (plugins.PluginRequeueResult, *framework.Status) { + return plugins.PluginRequeueResult{}, framework.NewStatus(r.Name(), framework.Success, "") +} + +// Calculate clusters scores based on the resource allocatable. +// The clusters that has the most allocatable are given the highest score, while the least is given the lowest score. +// The score range is from -100 to 100. +func mostResourceAllocatableScores(resourceName clusterapiv1.ResourceName, clusters []*clusterapiv1.ManagedCluster) plugins.PluginScoreResult { + scores := map[string]int64{} + + // get resourceName's min and max allocatable among all the clusters + minAllocatable, maxAllocatable, err := getClustersMinMaxAllocatableResource(clusters, resourceName) + if err != nil { + return plugins.PluginScoreResult{ + Scores: scores, + } + } + + for _, cluster := range clusters { + // get one cluster resourceName's allocatable + allocatable, _, err := getClusterResource(cluster, resourceName) + if err != nil { + continue + } + + // score = ((resource_x_allocatable - min(resource_x_allocatable)) / (max(resource_x_allocatable) - min(resource_x_allocatable)) - 0.5) * 2 * 100 + if (maxAllocatable - minAllocatable) != 0 { + ratio := float64(allocatable-minAllocatable) / float64(maxAllocatable-minAllocatable) + scores[cluster.Name] = int64((ratio - 0.5) * 2.0 * 100.0) + } else { + scores[cluster.Name] = 100.0 + } + } + + return plugins.PluginScoreResult{ + Scores: scores, + } +} + +// Go through one cluster resources and return the allocatable and capacity of the resourceName. +func getClusterResource(cluster *clusterapiv1.ManagedCluster, resourceName clusterapiv1.ResourceName) (allocatable, capacity float64, err error) { + if v, exist := cluster.Status.Allocatable[resourceName]; exist { + allocatable = v.AsApproximateFloat64() + } else { + return allocatable, capacity, fmt.Errorf("no allocatable %s found in cluster %s", resourceName, cluster.ObjectMeta.Name) + } + + if v, exist := cluster.Status.Capacity[resourceName]; exist { + capacity = v.AsApproximateFloat64() + } else { + return allocatable, capacity, fmt.Errorf("no capacity %s found in cluster %s", resourceName, cluster.ObjectMeta.Name) + } + + return allocatable, capacity, nil +} + +// Go through all the cluster resources and return the min and max allocatable value of the resourceName. +func getClustersMinMaxAllocatableResource(clusters []*clusterapiv1.ManagedCluster, resourceName clusterapiv1.ResourceName) (minAllocatable, maxAllocatable float64, err error) { + allocatable := sort.Float64Slice{} + + // get allocatable resources + for _, cluster := range clusters { + if alloc, _, err := getClusterResource(cluster, resourceName); err == nil { + allocatable = append(allocatable, alloc) + } + } + + // return err if no allocatable resource + if len(allocatable) == 0 { + return 0, 0, fmt.Errorf("no allocatable %s found in clusters", resourceName) + } + + // sort to get min and max + sort.Float64s(allocatable) + return allocatable[0], allocatable[len(allocatable)-1], nil +} diff --git a/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/steady/steady.go b/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/steady/steady.go new file mode 100644 index 000000000..83af9b6df --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/steady/steady.go @@ -0,0 +1,92 @@ +package steady + +import ( + "context" + "reflect" + + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/sets" + clusterapiv1 "open-cluster-management.io/api/cluster/v1" + clusterapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + "open-cluster-management.io/placement/pkg/controllers/framework" + "open-cluster-management.io/placement/pkg/plugins" +) + +const ( + placementLabel = "cluster.open-cluster-management.io/placement" + description = ` + Steady prioritizer ensure the existing decision is stabilized. The clusters that existing decisions + choose are given the highest score while the clusters with no existing decisions are given the lowest + score. + ` +) + +var _ plugins.Prioritizer = &Steady{} + +type Steady struct { + handle plugins.Handle +} + +func New(handle plugins.Handle) *Steady { + return &Steady{ + handle: handle, + } +} + +func (s *Steady) Name() string { + return reflect.TypeOf(*s).Name() +} + +func (s *Steady) Description() string { + return description +} + +func (s *Steady) Score( + ctx context.Context, placement *clusterapiv1beta1.Placement, clusters []*clusterapiv1.ManagedCluster) (plugins.PluginScoreResult, *framework.Status) { + // query placementdecisions with label selector + scores := map[string]int64{} + requirement, err := labels.NewRequirement(placementLabel, selection.Equals, []string{placement.Name}) + + if err != nil { + return plugins.PluginScoreResult{}, framework.NewStatus( + s.Name(), + framework.Error, + err.Error(), + ) + } + + labelSelector := labels.NewSelector().Add(*requirement) + decisions, err := s.handle.DecisionLister().PlacementDecisions(placement.Namespace).List(labelSelector) + + if err != nil { + return plugins.PluginScoreResult{}, framework.NewStatus( + s.Name(), + framework.Error, + err.Error(), + ) + } + + existingDecisions := sets.String{} + for _, decision := range decisions { + for _, d := range decision.Status.Decisions { + existingDecisions.Insert(d.ClusterName) + } + } + + for _, cluster := range clusters { + if existingDecisions.Has(cluster.Name) { + scores[cluster.Name] = plugins.MaxClusterScore + } else { + scores[cluster.Name] = 0 + } + } + + return plugins.PluginScoreResult{ + Scores: scores, + }, framework.NewStatus(s.Name(), framework.Success, "") +} + +func (s *Steady) RequeueAfter(ctx context.Context, placement *clusterapiv1beta1.Placement) (plugins.PluginRequeueResult, *framework.Status) { + return plugins.PluginRequeueResult{}, framework.NewStatus(s.Name(), framework.Success, "") +} diff --git a/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/tainttoleration/taint_toleration.go b/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/tainttoleration/taint_toleration.go new file mode 100644 index 000000000..e856ec734 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/placement/pkg/plugins/tainttoleration/taint_toleration.go @@ -0,0 +1,261 @@ +package tainttoleration + +import ( + "context" + "fmt" + "reflect" + "time" + + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" + "k8s.io/utils/clock" + clusterapiv1 "open-cluster-management.io/api/cluster/v1" + clusterapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + "open-cluster-management.io/placement/pkg/controllers/framework" + "open-cluster-management.io/placement/pkg/plugins" +) + +var _ plugins.Filter = &TaintToleration{} +var TolerationClock = (clock.Clock)(clock.RealClock{}) + +const ( + placementLabel = "cluster.open-cluster-management.io/placement" + description = "TaintToleration is a plugin that checks if a placement tolerates a managed cluster's taints" +) + +type TaintToleration struct { + handle plugins.Handle +} + +func New(handle plugins.Handle) *TaintToleration { + return &TaintToleration{ + handle: handle, + } +} + +func (p *TaintToleration) Name() string { + return reflect.TypeOf(*p).Name() +} + +func (pl *TaintToleration) Description() string { + return description +} + +func (pl *TaintToleration) Filter(ctx context.Context, placement *clusterapiv1beta1.Placement, clusters []*clusterapiv1.ManagedCluster) (plugins.PluginFilterResult, *framework.Status) { + status := framework.NewStatus(pl.Name(), framework.Success, "") + + if len(clusters) == 0 { + return plugins.PluginFilterResult{ + Filtered: clusters, + }, status + } + + // do validation on each toleration and return error if necessary + for _, toleration := range placement.Spec.Tolerations { + if len(toleration.Key) == 0 && toleration.Operator != clusterapiv1beta1.TolerationOpExists { + return plugins.PluginFilterResult{}, framework.NewStatus( + pl.Name(), + framework.Misconfigured, + "If the key is empty, operator must be Exists.", + ) + } + if toleration.Operator == clusterapiv1beta1.TolerationOpExists && len(toleration.Value) > 0 { + return plugins.PluginFilterResult{}, framework.NewStatus( + pl.Name(), + framework.Misconfigured, + "If the operator is Exists, the value should be empty.", + ) + } + } + + decisionClusterNames := getDecisionClusterNames(pl.handle, placement) + + // filter the clusters + matched := []*clusterapiv1.ManagedCluster{} + for _, cluster := range clusters { + if tolerated, _, _ := isClusterTolerated(cluster, placement.Spec.Tolerations, decisionClusterNames.Has(cluster.Name)); tolerated { + matched = append(matched, cluster) + } + } + + return plugins.PluginFilterResult{ + Filtered: matched, + }, status +} + +func (pl *TaintToleration) RequeueAfter(ctx context.Context, placement *clusterapiv1beta1.Placement) (plugins.PluginRequeueResult, *framework.Status) { + status := framework.NewStatus(pl.Name(), framework.Success, "") + // get exist decisions clusters + decisionClusterNames, decisionClusters := getDecisionClusters(pl.handle, placement) + if decisionClusterNames == nil || decisionClusters == nil { + return plugins.PluginRequeueResult{}, status + } + + var minRequeue *plugins.PluginRequeueResult + // filter and record pluginRequeueResults + for _, cluster := range decisionClusters { + if tolerated, requeue, msg := isClusterTolerated(cluster, placement.Spec.Tolerations, decisionClusterNames.Has(cluster.Name)); tolerated { + minRequeue = minRequeueTime(minRequeue, requeue) + } else { + status.AppendReason(msg) + } + } + + if minRequeue == nil { + return plugins.PluginRequeueResult{}, status + } + + return *minRequeue, status +} + +// isClusterTolerated returns true if a cluster is tolerated by the given toleration array +func isClusterTolerated(cluster *clusterapiv1.ManagedCluster, tolerations []clusterapiv1beta1.Toleration, inDecision bool) (bool, *plugins.PluginRequeueResult, string) { + var minRequeue *plugins.PluginRequeueResult + for _, taint := range cluster.Spec.Taints { + tolerated, requeue, message := isTaintTolerated(taint, tolerations, inDecision) + if !tolerated { + return false, nil, message + } + minRequeue = minRequeueTime(minRequeue, requeue) + } + + return true, minRequeue, "" +} + +// isTaintTolerated returns true if a taint is tolerated by the given toleration array +func isTaintTolerated(taint clusterapiv1.Taint, tolerations []clusterapiv1beta1.Toleration, inDecision bool) (bool, *plugins.PluginRequeueResult, string) { + message := "" + if taint.Effect == clusterapiv1.TaintEffectPreferNoSelect { + return true, nil, message + } + + if (taint.Effect == clusterapiv1.TaintEffectNoSelectIfNew) && inDecision { + return true, nil, message + } + + for _, toleration := range tolerations { + if tolerated, requeue, msg := isTolerated(taint, toleration); tolerated { + return true, requeue, msg + } else { + message = msg + } + } + + return false, nil, message +} + +// isTolerated returns true if a taint is tolerated by the given toleration +func isTolerated(taint clusterapiv1.Taint, toleration clusterapiv1beta1.Toleration) (bool, *plugins.PluginRequeueResult, string) { + if len(toleration.Effect) > 0 && toleration.Effect != taint.Effect { + return false, nil, "" + } + + if len(toleration.Key) > 0 && toleration.Key != taint.Key { + return false, nil, "" + } + + taintMatched := false + switch toleration.Operator { + // empty operator means Equal + case "", clusterapiv1beta1.TolerationOpEqual: + taintMatched = (toleration.Value == taint.Value) + case clusterapiv1beta1.TolerationOpExists: + taintMatched = true + } + + if taintMatched { + return isTolerationTimeExpired(taint, toleration) + } + + return false, nil, "" + +} + +// isTolerationTimeExpired returns true if TolerationSeconds is nil or not expired +func isTolerationTimeExpired(taint clusterapiv1.Taint, toleration clusterapiv1beta1.Toleration) (bool, *plugins.PluginRequeueResult, string) { + // TolerationSeconds is nil means it never expire + if toleration.TolerationSeconds == nil { + return true, nil, "" + } + + requeueTime := taint.TimeAdded.Add(time.Duration(*toleration.TolerationSeconds) * time.Second) + + if TolerationClock.Now().Before(requeueTime) { + message := fmt.Sprintf( + "Cluster %s taint is added at %v, placement toleration seconds is %d", + "clustername", + taint.TimeAdded, + *toleration.TolerationSeconds, + ) + p := plugins.PluginRequeueResult{ + RequeueTime: &requeueTime, + } + return true, &p, message + } + + return false, nil, "" +} + +func getDecisionClusterNames(handle plugins.Handle, placement *clusterapiv1beta1.Placement) sets.String { + existingDecisions := sets.String{} + + // query placementdecisions with label selector + requirement, err := labels.NewRequirement(placementLabel, selection.Equals, []string{placement.Name}) + if err != nil { + return existingDecisions + } + + labelSelector := labels.NewSelector().Add(*requirement) + decisions, err := handle.DecisionLister().PlacementDecisions(placement.Namespace).List(labelSelector) + if err != nil { + return existingDecisions + } + + for _, decision := range decisions { + for _, d := range decision.Status.Decisions { + existingDecisions.Insert(d.ClusterName) + } + } + + return existingDecisions +} + +func getDecisionClusters(handle plugins.Handle, placement *clusterapiv1beta1.Placement) (sets.String, []*clusterapiv1.ManagedCluster) { + // get existing decision cluster name + decisionClusterNames := getDecisionClusterNames(handle, placement) + + // get existing decision clusters + decisionClusters := []*clusterapiv1.ManagedCluster{} + for c := range decisionClusterNames { + if managedCluser, err := handle.ClusterLister().Get(c); err != nil { + klog.Warningf("Failed to get ManagedCluster: %s", err) + } else { + decisionClusters = append(decisionClusters, managedCluser) + } + } + + return decisionClusterNames, decisionClusters +} + +// return the PluginRequeueResult with minimal requeue time +func minRequeueTime(x, y *plugins.PluginRequeueResult) *plugins.PluginRequeueResult { + if x == nil { + return y + } + if y == nil { + return x + } + + t1 := x.RequeueTime + t2 := y.RequeueTime + if t1 == nil || t2 == nil { + return nil + } + if t1.Before(*t2) { + return x + } else { + return y + } +} diff --git a/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/addon/discovery_controller.go b/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/addon/discovery_controller.go index 5208f1e55..bee52bf01 100644 --- a/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/addon/discovery_controller.go +++ b/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/addon/discovery_controller.go @@ -128,6 +128,10 @@ func (c *addOnFeatureDiscoveryController) syncAddOn(ctx context.Context, cluster } cluster, err := c.clusterLister.Get(clusterName) + if errors.IsNotFound(err) { + // no cluster, it could be deleted + return nil + } if err != nil { return fmt.Errorf("unable to find cluster with name %q: %w", clusterName, err) } diff --git a/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/addon/healthcheck_controller.go b/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/addon/healthcheck_controller.go index 063b3d762..4a6ba7b3b 100644 --- a/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/addon/healthcheck_controller.go +++ b/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/addon/healthcheck_controller.go @@ -4,6 +4,7 @@ import ( "context" operatorhelpers "github.com/openshift/library-go/pkg/operator/v1helpers" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" addonclient "open-cluster-management.io/api/client/addon/clientset/versioned" addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" @@ -22,8 +23,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -const addOnAvailableConditionType = "Available" //TODO add this to ManagedClusterAddOn api - // managedClusterAddonHealthCheckController udpates managed cluster addons status through watching the managed cluster status on // the hub cluster. type managedClusterAddOnHealthCheckController struct { @@ -95,7 +94,7 @@ func (c *managedClusterAddOnHealthCheckController) sync(ctx context.Context, syn addOn.Namespace, addOn.Name, helpers.UpdateManagedClusterAddOnStatusFn(metav1.Condition{ - Type: addOnAvailableConditionType, + Type: addonv1alpha1.ManagedClusterAddOnConditionAvailable, Status: managedClusterAvailableCondition.Status, Reason: managedClusterAvailableCondition.Reason, Message: managedClusterAvailableCondition.Message, diff --git a/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/clusterrole/manifests/managedcluster-registration-clusterrole.yaml b/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/clusterrole/manifests/managedcluster-registration-clusterrole.yaml index cd945d237..dc58e1c51 100644 --- a/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/clusterrole/manifests/managedcluster-registration-clusterrole.yaml +++ b/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/clusterrole/manifests/managedcluster-registration-clusterrole.yaml @@ -6,9 +6,7 @@ rules: # Allow spoke registration agent to get/update coordination.k8s.io/lease - apiGroups: ["coordination.k8s.io"] resources: ["leases"] - #TODO: for backward compatible, we do not limit the resource name in release 2.3. - #After release 2.3, we will limit the resource name. - #resourceNames: ["managed-cluster-lease"] + resourceNames: ["managed-cluster-lease"] verbs: ["get", "update"] # Allow agent to get/list/watch managed cluster addons - apiGroups: ["addon.open-cluster-management.io"] diff --git a/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/managedcluster/controller.go b/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/managedcluster/controller.go index ef754e23b..e81363c0a 100644 --- a/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/managedcluster/controller.go +++ b/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/managedcluster/controller.go @@ -140,9 +140,8 @@ func (c *managedClusterController) sync(ctx context.Context, syncCtx factory.Syn return err } - // TODO: we will add the managedcluster-namespace.yaml back to staticFiles - // in next release, currently, we need keep the namespace after the managed - // cluster is deleted. + // TODO consider to add the managedcluster-namespace.yaml back to staticFiles, + // currently, we keep the namespace after the managed cluster is deleted. applyFiles := []string{"manifests/managedcluster-namespace.yaml"} applyFiles = append(applyFiles, staticFiles...) diff --git a/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/managedclusterset/controller.go b/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/managedclusterset/controller.go index 5498957b3..9c9c249eb 100644 --- a/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/managedclusterset/controller.go +++ b/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/managedclusterset/controller.go @@ -58,7 +58,7 @@ func NewManagedClusterSetController( utilruntime.HandleError(fmt.Errorf("error to get object: %v", obj)) return } - //enqueue all cluster related clustersets + // enqueue all cluster related clustersets c.enqueueClusterClusterSet(cluster) }, UpdateFunc: func(oldObj, newObj interface{}) { @@ -81,7 +81,7 @@ func NewManagedClusterSetController( DeleteFunc: func(obj interface{}) { switch t := obj.(type) { case *v1.ManagedCluster: - //enqueue all cluster related clustersets + // enqueue all cluster related clustersets c.enqueueClusterClusterSet(t) case cache.DeletedFinalStateUnknown: cluster, ok := t.Obj.(*v1.ManagedCluster) @@ -89,7 +89,7 @@ func NewManagedClusterSetController( utilruntime.HandleError(fmt.Errorf("error to get object: %v", obj)) return } - //enqueue all cluster related clustersets + // enqueue all cluster related clustersets c.enqueueClusterClusterSet(cluster) default: utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type")) @@ -113,7 +113,7 @@ func (c *managedClusterSetController) sync(ctx context.Context, syncCtx factory. if len(clusterSetName) == 0 { return nil } - klog.Infof("Reconciling ManagedClusterSet %s", clusterSetName) + klog.V(4).Infof("Reconciling ManagedClusterSet %s", clusterSetName) clusterSet, err := c.clusterSetLister.Get(clusterSetName) if errors.IsNotFound(err) { // cluster set not found, could have been deleted, do nothing. @@ -171,7 +171,7 @@ func (c *managedClusterSetController) syncClusterSet(ctx context.Context, origin return nil } -//enqueueClusterClusterSet enqueue a cluster related clusterset +// enqueueClusterClusterSet enqueue a cluster related clusterset func (c *managedClusterSetController) enqueueClusterClusterSet(cluster *v1.ManagedCluster) { clusterSets, err := clusterv1beta1.GetClusterSetsOfCluster(cluster, c.clusterSetLister) if err != nil { @@ -183,8 +183,8 @@ func (c *managedClusterSetController) enqueueClusterClusterSet(cluster *v1.Manag } } -//enqueueUpdateClusterClusterSet get the oldCluster related clustersets and newCluster related clustersets, -//then enqueue the diff clustersets(added clustersets and removed clustersets) +// enqueueUpdateClusterClusterSet get the oldCluster related clustersets and newCluster related clustersets, +// then enqueue the diff clustersets(added clustersets and removed clustersets) func (c *managedClusterSetController) enqueueUpdateClusterClusterSet(oldCluster, newCluster *v1.ManagedCluster) { oldClusterSets, err := clusterv1beta1.GetClusterSetsOfCluster(oldCluster, c.clusterSetLister) if err != nil { @@ -203,7 +203,7 @@ func (c *managedClusterSetController) enqueueUpdateClusterClusterSet(oldCluster, } } -//getDiffClusterSetsNames return the diff clustersets names +// getDiffClusterSetsNames return the diff clustersets names func getDiffClusterSetsNames(oldSets, newSets []*clusterv1beta1.ManagedClusterSet) sets.String { oldSetsMap := sets.NewString() newSetsMap := sets.NewString() diff --git a/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/managedclustersetbinding/controller.go b/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/managedclustersetbinding/controller.go index d43ce7e5c..aeb72d1d9 100644 --- a/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/managedclustersetbinding/controller.go +++ b/controlplane/vendor/open-cluster-management.io/registration/pkg/hub/managedclustersetbinding/controller.go @@ -138,7 +138,7 @@ func (c *managedClusterSetBindingController) sync(ctx context.Context, syncCtx f return nil } - klog.Infof("Reconciling ManagedClusterSetBinding %s", key) + klog.V(4).Infof("Reconciling ManagedClusterSetBinding %s", key) bindingNamespace, bindingName, err := cache.SplitMetaNamespaceKey(key) if err != nil { diff --git a/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/cluster/mutating_webhook.go b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/cluster/mutating_webhook.go deleted file mode 100644 index 295d5a91a..000000000 --- a/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/cluster/mutating_webhook.go +++ /dev/null @@ -1,253 +0,0 @@ -package cluster - -import ( - "encoding/json" - "fmt" - "net/http" - ocmfeature "open-cluster-management.io/api/feature" - "strings" - "time" - - admissionv1beta1 "k8s.io/api/admission/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/rest" - "k8s.io/klog/v2" - clusterv1 "open-cluster-management.io/api/cluster/v1" - clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" - "open-cluster-management.io/registration/pkg/helpers" -) - -var nowFunc = time.Now -var defaultClusterSetName = "default" - -type jsonPatchOperation struct { - Operation string `json:"op"` - Path string `json:"path"` - Value interface{} `json:"value,omitempty"` -} - -// ManagedClusterMutatingAdmissionHook will mutate the creating/updating managedcluster request. -type ManagedClusterMutatingAdmissionHook struct{} - -// MutatingResource is called by generic-admission-server on startup to register the returned REST resource through which the -// webhook is accessed by the kube apiserver. -func (a *ManagedClusterMutatingAdmissionHook) MutatingResource() (schema.GroupVersionResource, string) { - return schema.GroupVersionResource{ - Group: "admission.cluster.open-cluster-management.io", - Version: "v1", - Resource: "managedclustermutators", - }, - "managedclustermutators" -} - -// Admit is called by generic-admission-server when the registered REST resource above is called with an admission request. -func (a *ManagedClusterMutatingAdmissionHook) Admit(req *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { - klog.V(4).Infof("mutate %q operation for object %q", req.Operation, req.Object) - - status := &admissionv1beta1.AdmissionResponse{ - Allowed: true, - } - - // only mutate the request for managedcluster - if req.Resource.Group != "cluster.open-cluster-management.io" || - req.Resource.Resource != "managedclusters" { - return status - } - - // only mutate create and update operation - if req.Operation != admissionv1beta1.Create && req.Operation != admissionv1beta1.Update { - return status - } - - managedCluster := &clusterv1.ManagedCluster{} - if err := json.Unmarshal(req.Object.Raw, managedCluster); err != nil { - status.Allowed = false - status.Result = &metav1.Status{ - Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, - Message: err.Error(), - } - return status - } - - var jsonPatches []jsonPatchOperation - - // set timeAdded of taint if it is nil and reset it if it is modified - taintJsonPatches, status := a.processTaints(managedCluster, req.OldObject.Raw) - if !status.Allowed { - return status - } - jsonPatches = append(jsonPatches, taintJsonPatches...) - - if utilfeature.DefaultMutableFeatureGate.Enabled(ocmfeature.DefaultClusterSet) { - labelJsonPatches, status := a.addDefaultClusterSetLabel(managedCluster, req.Object.Raw) - if !status.Allowed { - return status - } - jsonPatches = append(jsonPatches, labelJsonPatches...) - } - - if len(jsonPatches) == 0 { - return status - } - - patch, err := json.Marshal(jsonPatches) - if err != nil { - status.Allowed = false - status.Result = &metav1.Status{ - Status: metav1.StatusFailure, Code: http.StatusInternalServerError, Reason: metav1.StatusReasonInternalError, - Message: err.Error(), - } - return status - } - - status.Patch = patch - pt := admissionv1beta1.PatchTypeJSONPatch - status.PatchType = &pt - return status -} - -//addDefaultClusterSetLabel add label "cluster.open-cluster-management.io/clusterset:default" for ManagedCluster if the managedCluster has no ManagedClusterSet label -func (a *ManagedClusterMutatingAdmissionHook) addDefaultClusterSetLabel(managedCluster *clusterv1.ManagedCluster, clusterObj []byte) ([]jsonPatchOperation, *admissionv1beta1.AdmissionResponse) { - var jsonPatches []jsonPatchOperation - - status := &admissionv1beta1.AdmissionResponse{ - Allowed: true, - } - - if len(managedCluster.Labels) == 0 { - jsonPatches = []jsonPatchOperation{ - { - Operation: "add", - Path: "/metadata/labels", - Value: map[string]string{ - clusterv1beta1.ClusterSetLabel: defaultClusterSetName, - }, - }, - } - return jsonPatches, status - } - - clusterSetName, ok := managedCluster.Labels[clusterv1beta1.ClusterSetLabel] - // Clusterset label do not exist - if !ok { - jsonPatches = []jsonPatchOperation{ - { - Operation: "add", - // there is a "/" in clusterset label. So need to transfer the "/" to "~1". - Path: "/metadata/labels/cluster.open-cluster-management.io~1clusterset", - Value: defaultClusterSetName, - }, - } - return jsonPatches, status - } - - // The clusterset label's value is "", update it to "default" - if len(clusterSetName) == 0 { - jsonPatches = []jsonPatchOperation{ - { - Operation: "replace", - // there is a "/" in clusterset label. So need to transfer the "/" to "~1". - Path: "/metadata/labels/cluster.open-cluster-management.io~1clusterset", - Value: defaultClusterSetName, - }, - } - return jsonPatches, status - } - - return nil, status -} - -// processTaints generates json patched for cluster taints -func (a *ManagedClusterMutatingAdmissionHook) processTaints(managedCluster *clusterv1.ManagedCluster, oldManagedClusterRaw []byte) ([]jsonPatchOperation, *admissionv1beta1.AdmissionResponse) { - status := &admissionv1beta1.AdmissionResponse{ - Allowed: true, - } - - if len(managedCluster.Spec.Taints) == 0 { - return nil, status - } - - var oldManagedCluster *clusterv1.ManagedCluster - if len(oldManagedClusterRaw) > 0 { - cluster := &clusterv1.ManagedCluster{} - if err := json.Unmarshal(oldManagedClusterRaw, cluster); err != nil { - status.Allowed = false - status.Result = &metav1.Status{ - Status: metav1.StatusFailure, Code: http.StatusInternalServerError, Reason: metav1.StatusReasonInternalError, - Message: err.Error(), - } - return nil, status - } - oldManagedCluster = cluster - } - - var invalidTaints []string - var jsonPatches []jsonPatchOperation - now := metav1.NewTime(nowFunc()) - for index, taint := range managedCluster.Spec.Taints { - originalTaint := helpers.FindTaintByKey(oldManagedCluster, taint.Key) - switch { - case oldManagedCluster == nil: - // handle CREATE operation. - // The request will not be denied if it has taints with timeAdded specified, - // while the specified values will be ignored. - if !taint.TimeAdded.IsZero() { - status.Warnings = append(status.Warnings, - fmt.Sprintf("The specified TimeAdded value of Taint %q is ignored: %s.", taint.Key, taint.TimeAdded.UTC().Format(time.RFC3339))) - } - jsonPatches = append(jsonPatches, newTaintTimeAddedJsonPatch(index, now.Time)) - case originalTaint == nil: - // handle UPDATE operation. - // new taint - // The request will be denied if it has any taint with timeAdded specified. - if !taint.TimeAdded.IsZero() { - invalidTaints = append(invalidTaints, taint.Key) - continue - } - jsonPatches = append(jsonPatches, newTaintTimeAddedJsonPatch(index, now.Time)) - case originalTaint.Value == taint.Value && originalTaint.Effect == taint.Effect: - // handle UPDATE operation. - // no change - // The request will be denied if it has any taint with different timeAdded specified. - if !originalTaint.TimeAdded.Equal(&taint.TimeAdded) { - invalidTaints = append(invalidTaints, taint.Key) - } - default: - // handle UPDATE operation. - // taint's value/effect has changed - // The request will be denied if it has any taint with timeAdded specified. - if !taint.TimeAdded.IsZero() { - invalidTaints = append(invalidTaints, taint.Key) - continue - } - jsonPatches = append(jsonPatches, newTaintTimeAddedJsonPatch(index, now.Time)) - } - } - - if len(invalidTaints) == 0 { - return jsonPatches, status - } - - status.Allowed = false - status.Result = &metav1.Status{ - Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, - Message: fmt.Sprintf("It is not allowed to set TimeAdded of Taint %q.", strings.Join(invalidTaints, ",")), - } - return nil, status -} - -// Initialize is called by generic-admission-server on startup to setup initialization that managedclusters webhook needs. -func (a *ManagedClusterMutatingAdmissionHook) Initialize(kubeClientConfig *rest.Config, stopCh <-chan struct{}) error { - // do nothing - return nil -} - -func newTaintTimeAddedJsonPatch(index int, timeAdded time.Time) jsonPatchOperation { - return jsonPatchOperation{ - Operation: "replace", - Path: fmt.Sprintf("/spec/taints/%d/timeAdded", index), - Value: timeAdded.UTC().Format(time.RFC3339), - } -} diff --git a/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/cluster/validating_webhook.go b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/cluster/validating_webhook.go deleted file mode 100644 index 24ec3d27b..000000000 --- a/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/cluster/validating_webhook.go +++ /dev/null @@ -1,295 +0,0 @@ -package cluster - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - - clusterv1 "open-cluster-management.io/api/cluster/v1" - "open-cluster-management.io/registration/pkg/helpers" - - operatorhelpers "github.com/openshift/library-go/pkg/operator/v1helpers" - - admissionv1beta1 "k8s.io/api/admission/v1beta1" - authenticationv1 "k8s.io/api/authentication/v1" - authorizationv1 "k8s.io/api/authorization/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/klog/v2" - clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" -) - -// ManagedClusterValidatingAdmissionHook will validate the creating/updating managedcluster request. -type ManagedClusterValidatingAdmissionHook struct { - kubeClient kubernetes.Interface -} - -func (h *ManagedClusterValidatingAdmissionHook) SetKubeClient(c kubernetes.Interface) { - h.kubeClient = c -} - -// ValidatingResource is called by generic-admission-server on startup to register the returned REST resource through which the -// webhook is accessed by the kube apiserver. -func (a *ManagedClusterValidatingAdmissionHook) ValidatingResource() (plural schema.GroupVersionResource, singular string) { - return schema.GroupVersionResource{ - Group: "admission.cluster.open-cluster-management.io", - Version: "v1", - Resource: "managedclustervalidators", - }, - "managedclustervalidators" -} - -// Validate is called by generic-admission-server when the registered REST resource above is called with an admission request. -func (a *ManagedClusterValidatingAdmissionHook) Validate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { - klog.V(4).Infof("validate %q operation for object %q", admissionSpec.Operation, admissionSpec.Object) - - status := &admissionv1beta1.AdmissionResponse{} - - // only validate the request for managedcluster - if admissionSpec.Resource.Group != "cluster.open-cluster-management.io" || - admissionSpec.Resource.Resource != "managedclusters" { - status.Allowed = true - return status - } - - switch admissionSpec.Operation { - case admissionv1beta1.Create: - return a.validateCreateRequest(admissionSpec) - case admissionv1beta1.Update: - return a.validateUpdateRequest(admissionSpec) - default: - status.Allowed = true - return status - } -} - -// Initialize is called by generic-admission-server on startup to setup initialization that managedclusters webhook needs. -func (a *ManagedClusterValidatingAdmissionHook) Initialize(kubeClientConfig *rest.Config, stopCh <-chan struct{}) error { - var err error - a.kubeClient, err = kubernetes.NewForConfig(kubeClientConfig) - return err -} - -// validateCreateRequest validates create managed cluster operation -func (a *ManagedClusterValidatingAdmissionHook) validateCreateRequest(request *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { - status := &admissionv1beta1.AdmissionResponse{} - - // validate ManagedCluster object firstly - managedCluster, err := a.validateManagedClusterObj(request.Object) - if err != nil { - status.Allowed = false - status.Result = &metav1.Status{ - Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, - Message: err.Error(), - } - return status - } - - if managedCluster.Spec.HubAcceptsClient { - // the HubAcceptsClient field is changed, we need to check the request user whether - // has been allowed to change the HubAcceptsClient field with SubjectAccessReview api - if status := a.allowUpdateAcceptField(managedCluster.Name, request.UserInfo); !status.Allowed { - return status - } - } - - // check whether the request user has been allowed to set clusterset label - var clusterSetName string - if len(managedCluster.Labels) > 0 { - clusterSetName = managedCluster.Labels[clusterv1beta1.ClusterSetLabel] - } - - return a.allowSetClusterSetLabel(request.UserInfo, "", clusterSetName) -} - -// validateUpdateRequest validates update managed cluster operation. -func (a *ManagedClusterValidatingAdmissionHook) validateUpdateRequest(request *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { - status := &admissionv1beta1.AdmissionResponse{} - - oldManagedCluster := &clusterv1.ManagedCluster{} - if err := json.Unmarshal(request.OldObject.Raw, oldManagedCluster); err != nil { - status.Allowed = false - status.Result = &metav1.Status{ - Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, - Message: err.Error(), - } - return status - } - - // validate the updating ManagedCluster object firstly - newManagedCluster, err := a.validateManagedClusterObj(request.Object) - if err != nil { - status.Allowed = false - status.Result = &metav1.Status{ - Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, - Message: err.Error(), - } - return status - } - - if newManagedCluster.Spec.HubAcceptsClient != oldManagedCluster.Spec.HubAcceptsClient { - // the HubAcceptsClient field is changed, we need to check the request user whether - // has been allowed to update the HubAcceptsClient field with SubjectAccessReview api - if status := a.allowUpdateAcceptField(newManagedCluster.Name, request.UserInfo); !status.Allowed { - return status - } - } - - // check whether the request user has been allowed to set clusterset label - var originalClusterSetName, currentClusterSetName string - if len(oldManagedCluster.Labels) > 0 { - originalClusterSetName = oldManagedCluster.Labels[clusterv1beta1.ClusterSetLabel] - } - if len(newManagedCluster.Labels) > 0 { - currentClusterSetName = newManagedCluster.Labels[clusterv1beta1.ClusterSetLabel] - } - - return a.allowSetClusterSetLabel(request.UserInfo, originalClusterSetName, currentClusterSetName) -} - -// validateManagedClusterObj validates the fileds of ManagedCluster object -func (a *ManagedClusterValidatingAdmissionHook) validateManagedClusterObj(requestObj runtime.RawExtension) (*clusterv1.ManagedCluster, error) { - errs := []error{} - - managedCluster := &clusterv1.ManagedCluster{} - if err := json.Unmarshal(requestObj.Raw, managedCluster); err != nil { - errs = append(errs, err) - } - - // there are no spoke client configs, finish the validation process - if len(managedCluster.Spec.ManagedClusterClientConfigs) == 0 { - return managedCluster, operatorhelpers.NewMultiLineAggregate(errs) - } - - // validate the url in spoke client configs - for _, clientConfig := range managedCluster.Spec.ManagedClusterClientConfigs { - if !helpers.IsValidHTTPSURL(clientConfig.URL) { - errs = append(errs, fmt.Errorf("url %q is invalid in client configs", clientConfig.URL)) - } - } - - return managedCluster, operatorhelpers.NewMultiLineAggregate(errs) -} - -// allowUpdateHubAcceptsClientField using SubjectAccessReview API to check whether a request user has been authorized to update -// HubAcceptsClient field -func (a *ManagedClusterValidatingAdmissionHook) allowUpdateAcceptField(clusterName string, userInfo authenticationv1.UserInfo) *admissionv1beta1.AdmissionResponse { - status := &admissionv1beta1.AdmissionResponse{} - - extra := make(map[string]authorizationv1.ExtraValue) - for k, v := range userInfo.Extra { - extra[k] = authorizationv1.ExtraValue(v) - } - - sar := &authorizationv1.SubjectAccessReview{ - Spec: authorizationv1.SubjectAccessReviewSpec{ - User: userInfo.Username, - UID: userInfo.UID, - Groups: userInfo.Groups, - Extra: extra, - ResourceAttributes: &authorizationv1.ResourceAttributes{ - Group: "register.open-cluster-management.io", - Resource: "managedclusters", - Verb: "update", - Subresource: "accept", - Name: clusterName, - }, - }, - } - sar, err := a.kubeClient.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{}) - if err != nil { - status.Allowed = false - status.Result = &metav1.Status{ - Status: metav1.StatusFailure, Code: http.StatusForbidden, Reason: metav1.StatusReasonForbidden, - Message: err.Error(), - } - return status - } - - if !sar.Status.Allowed { - status.Allowed = false - status.Result = &metav1.Status{ - Status: metav1.StatusFailure, Code: http.StatusForbidden, Reason: metav1.StatusReasonForbidden, - Message: fmt.Sprintf("user %q cannot update the HubAcceptsClient field", userInfo.Username), - } - return status - } - - status.Allowed = true - return status -} - -// allowSetClusterSetLabel checks whether a request user has been authorized to set clusterset label -func (a *ManagedClusterValidatingAdmissionHook) allowSetClusterSetLabel(userInfo authenticationv1.UserInfo, originalClusterSet, newClusterSet string) *admissionv1beta1.AdmissionResponse { - if originalClusterSet == newClusterSet { - return &admissionv1beta1.AdmissionResponse{Allowed: true} - } - - if len(originalClusterSet) > 0 { - if status := a.allowUpdateClusterSet(userInfo, originalClusterSet); !status.Allowed { - return status - } - } - - if len(newClusterSet) > 0 { - if status := a.allowUpdateClusterSet(userInfo, newClusterSet); !status.Allowed { - return status - } - } - - return &admissionv1beta1.AdmissionResponse{ - Allowed: true, - } -} - -// allowUpdateClusterSet checks whether a request user has been authorized to add/remove a ManagedCluster -// to/from the ManagedClusterSet -func (a *ManagedClusterValidatingAdmissionHook) allowUpdateClusterSet(userInfo authenticationv1.UserInfo, clusterSetName string) *admissionv1beta1.AdmissionResponse { - status := &admissionv1beta1.AdmissionResponse{} - - extra := make(map[string]authorizationv1.ExtraValue) - for k, v := range userInfo.Extra { - extra[k] = authorizationv1.ExtraValue(v) - } - - sar := &authorizationv1.SubjectAccessReview{ - Spec: authorizationv1.SubjectAccessReviewSpec{ - User: userInfo.Username, - UID: userInfo.UID, - Groups: userInfo.Groups, - Extra: extra, - ResourceAttributes: &authorizationv1.ResourceAttributes{ - Group: "cluster.open-cluster-management.io", - Resource: "managedclustersets", - Subresource: "join", - Name: clusterSetName, - Verb: "create", - }, - }, - } - sar, err := a.kubeClient.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{}) - if err != nil { - status.Allowed = false - status.Result = &metav1.Status{ - Status: metav1.StatusFailure, Code: http.StatusForbidden, Reason: metav1.StatusReasonForbidden, - Message: err.Error(), - } - return status - } - - if !sar.Status.Allowed { - status.Allowed = false - status.Result = &metav1.Status{ - Status: metav1.StatusFailure, Code: http.StatusForbidden, Reason: metav1.StatusReasonForbidden, - Message: fmt.Sprintf("user %q cannot add/remove a ManagedCluster to/from ManagedClusterSet %q", userInfo.Username, clusterSetName), - } - return status - } - - status.Allowed = true - return status -} diff --git a/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/clustersetbinding/validating_webhook.go b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/clustersetbinding/validating_webhook.go deleted file mode 100644 index 3e49765fd..000000000 --- a/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/clustersetbinding/validating_webhook.go +++ /dev/null @@ -1,134 +0,0 @@ -package clustersetbinding - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - - admissionv1beta1 "k8s.io/api/admission/v1beta1" - authenticationv1 "k8s.io/api/authentication/v1" - authorizationv1 "k8s.io/api/authorization/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/klog/v2" - clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" -) - -// ManagedClusterSetBindingValidatingAdmissionHook will validate the creating/updating ManagedClusterSetBinding request. -type ManagedClusterSetBindingValidatingAdmissionHook struct { - kubeClient kubernetes.Interface -} - -func (h *ManagedClusterSetBindingValidatingAdmissionHook) SetKubeClient(c kubernetes.Interface) { - h.kubeClient = c -} - -// ValidatingResource is called by generic-admission-server on startup to register the returned REST resource through which the -// webhook is accessed by the kube apiserver. -func (a *ManagedClusterSetBindingValidatingAdmissionHook) ValidatingResource() (plural schema.GroupVersionResource, singular string) { - return schema.GroupVersionResource{ - Group: "admission.cluster.open-cluster-management.io", - Version: "v1", - Resource: "managedclustersetbindingvalidators", - }, - "managedclustersetbindingvalidators" -} - -// Validate is called by generic-admission-server when the registered REST resource above is called with an admission request. -func (a *ManagedClusterSetBindingValidatingAdmissionHook) Validate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { - klog.V(4).Infof("validate %q operation for object %q", admissionSpec.Operation, admissionSpec.Object) - - // only validate the request for ManagedClusterSetBinding - if admissionSpec.Resource.Group != "cluster.open-cluster-management.io" || - admissionSpec.Resource.Resource != "managedclustersetbindings" { - return acceptRequest() - } - - // only handle Create/Update Operation - if admissionSpec.Operation != admissionv1beta1.Create && admissionSpec.Operation != admissionv1beta1.Update { - return acceptRequest() - } - - binding := &clusterv1beta1.ManagedClusterSetBinding{} - if err := json.Unmarshal(admissionSpec.Object.Raw, binding); err != nil { - return denyRequest(http.StatusBadRequest, metav1.StatusReasonBadRequest, - fmt.Sprintf("Unable to unmarshal the ManagedClusterSetBinding object: %v", err)) - } - - // force the instance name to match the target cluster set name - if binding.Name != binding.Spec.ClusterSet { - return denyRequest(http.StatusBadRequest, metav1.StatusReasonBadRequest, - "The ManagedClusterSetBinding must have the same name as the target ManagedClusterSet") - } - - // check if the request user has permission to bind the target cluster set - if admissionSpec.Operation == admissionv1beta1.Create { - return a.allowBindingToClusterSet(binding.Spec.ClusterSet, admissionSpec.UserInfo) - } - - return acceptRequest() -} - -// Initialize is called by generic-admission-server on startup to setup initialization that ManagedClusterSetBinding webhook needs. -func (a *ManagedClusterSetBindingValidatingAdmissionHook) Initialize(kubeClientConfig *rest.Config, stopCh <-chan struct{}) error { - var err error - a.kubeClient, err = kubernetes.NewForConfig(kubeClientConfig) - if err != nil { - return err - } - - return nil -} - -// allowBindingToClusterSet checks if the user has permission to bind a particular cluster set -func (a *ManagedClusterSetBindingValidatingAdmissionHook) allowBindingToClusterSet(clusterSetName string, userInfo authenticationv1.UserInfo) *admissionv1beta1.AdmissionResponse { - extra := make(map[string]authorizationv1.ExtraValue) - for k, v := range userInfo.Extra { - extra[k] = authorizationv1.ExtraValue(v) - } - - sar := &authorizationv1.SubjectAccessReview{ - Spec: authorizationv1.SubjectAccessReviewSpec{ - User: userInfo.Username, - UID: userInfo.UID, - Groups: userInfo.Groups, - Extra: extra, - ResourceAttributes: &authorizationv1.ResourceAttributes{ - Group: "cluster.open-cluster-management.io", - Resource: "managedclustersets", - Subresource: "bind", - Verb: "create", - Name: clusterSetName, - }, - }, - } - sar, err := a.kubeClient.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{}) - if err != nil { - return denyRequest(http.StatusForbidden, metav1.StatusReasonForbidden, err.Error()) - } - if !sar.Status.Allowed { - return denyRequest(http.StatusForbidden, metav1.StatusReasonForbidden, fmt.Sprintf("user %q is not allowed to bind cluster set %q", userInfo.Username, clusterSetName)) - } - return acceptRequest() -} - -func acceptRequest() *admissionv1beta1.AdmissionResponse { - return &admissionv1beta1.AdmissionResponse{ - Allowed: true, - } -} - -func denyRequest(code int32, reason metav1.StatusReason, message string) *admissionv1beta1.AdmissionResponse { - return &admissionv1beta1.AdmissionResponse{ - Allowed: false, - Result: &metav1.Status{ - Status: metav1.StatusFailure, - Code: code, - Reason: reason, - Message: message, - }, - } -} diff --git a/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1/managedcluster_mutating.go b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1/managedcluster_mutating.go new file mode 100644 index 000000000..96bd4b500 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1/managedcluster_mutating.go @@ -0,0 +1,125 @@ +package v1 + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clusterv1 "open-cluster-management.io/api/cluster/v1" + clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2" + ocmfeature "open-cluster-management.io/api/feature" + "open-cluster-management.io/registration/pkg/features" + "open-cluster-management.io/registration/pkg/helpers" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +var ( + nowFunc = time.Now + defaultClusterSetName = "default" + _ webhook.CustomDefaulter = &ManagedClusterWebhook{} +) + +func (r *ManagedClusterWebhook) Default(ctx context.Context, obj runtime.Object) error { + req, err := admission.RequestFromContext(ctx) + if err != nil { + return apierrors.NewBadRequest(err.Error()) + } + + var oldManagedCluster *clusterv1.ManagedCluster + if len(req.OldObject.Raw) > 0 { + cluster := &clusterv1.ManagedCluster{} + if err := json.Unmarshal(req.OldObject.Raw, cluster); err != nil { + return apierrors.NewBadRequest(err.Error()) + } + oldManagedCluster = cluster + } + + managedCluster, ok := obj.(*clusterv1.ManagedCluster) + if !ok { + return apierrors.NewBadRequest("Request cluster obj format is not right") + } + + //Generate taints + err = r.processTaints(managedCluster, oldManagedCluster) + if err != nil { + return err + } + + //Set default clusterset label + if features.DefaultHubMutableFeatureGate.Enabled(ocmfeature.DefaultClusterSet) { + r.addDefaultClusterSetLabel(managedCluster) + } + + return nil +} + +// processTaints set cluster taints +func (r *ManagedClusterWebhook) processTaints(managedCluster, oldManagedCluster *clusterv1.ManagedCluster) error { + if len(managedCluster.Spec.Taints) == 0 { + return nil + } + now := metav1.NewTime(nowFunc()) + var invalidTaints []string + for index, taint := range managedCluster.Spec.Taints { + originalTaint := helpers.FindTaintByKey(oldManagedCluster, taint.Key) + switch { + case oldManagedCluster == nil: + // handle CREATE operation. + // The request will not be denied if it has taints with timeAdded specified, + // while the specified values will be ignored. + managedCluster.Spec.Taints[index].TimeAdded = now + case originalTaint == nil: + // handle UPDATE operation. + // new taint + // The request will be denied if it has any taint with timeAdded specified. + if !taint.TimeAdded.IsZero() { + invalidTaints = append(invalidTaints, taint.Key) + continue + } + managedCluster.Spec.Taints[index].TimeAdded = now + case originalTaint.Value == taint.Value && originalTaint.Effect == taint.Effect: + // handle UPDATE operation. + // no change + // The request will be denied if it has any taint with different timeAdded specified. + if !originalTaint.TimeAdded.Equal(&taint.TimeAdded) { + invalidTaints = append(invalidTaints, taint.Key) + } + default: + // handle UPDATE operation. + // taint's value/effect has changed + // The request will be denied if it has any taint with timeAdded specified. + if !taint.TimeAdded.IsZero() { + invalidTaints = append(invalidTaints, taint.Key) + continue + } + managedCluster.Spec.Taints[index].TimeAdded = now + } + } + + if len(invalidTaints) == 0 { + return nil + } + return apierrors.NewBadRequest(fmt.Sprintf("It is not allowed to set TimeAdded of Taint %q.", strings.Join(invalidTaints, ","))) +} + +// addDefaultClusterSetLabel add label "cluster.open-cluster-management.io/clusterset:default" for ManagedCluster if the managedCluster has no ManagedClusterSet label +func (a *ManagedClusterWebhook) addDefaultClusterSetLabel(managedCluster *clusterv1.ManagedCluster) { + if len(managedCluster.Labels) == 0 { + managedCluster.Labels = map[string]string{ + clusterv1beta2.ClusterSetLabel: defaultClusterSetName, + } + return + } + + clusterSetName, ok := managedCluster.Labels[clusterv1beta2.ClusterSetLabel] + // Clusterset label do not exist or "", set default clusterset label + if !ok || len(clusterSetName) == 0 { + managedCluster.Labels[clusterv1beta2.ClusterSetLabel] = defaultClusterSetName + } +} diff --git a/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1/managedcluster_validating.go b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1/managedcluster_validating.go new file mode 100644 index 000000000..aaaf9cdae --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1/managedcluster_validating.go @@ -0,0 +1,233 @@ +package v1 + +import ( + "context" + "fmt" + + operatorhelpers "github.com/openshift/library-go/pkg/operator/v1helpers" + authenticationv1 "k8s.io/api/authentication/v1" + authorizationv1 "k8s.io/api/authorization/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2" + + v1 "open-cluster-management.io/api/cluster/v1" + "open-cluster-management.io/registration/pkg/helpers" + + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +var _ webhook.CustomValidator = &ManagedClusterWebhook{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *ManagedClusterWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) error { + managedCluster, ok := obj.(*v1.ManagedCluster) + if !ok { + return apierrors.NewBadRequest("Request cluster obj format is not right") + } + req, err := admission.RequestFromContext(ctx) + if err != nil { + return apierrors.NewBadRequest(err.Error()) + } + //Validate if Spec.ManagedClusterClientConfigs is Valid HTTPS URL + err = r.validateManagedClusterObj(*managedCluster) + if err != nil { + return err + } + + // the HubAcceptsClient field is changed, we need to check the request user whether + // has been allowed to change the HubAcceptsClient field with SubjectAccessReview api + if managedCluster.Spec.HubAcceptsClient { + err := r.allowUpdateAcceptField(managedCluster.Name, req.UserInfo) + if err != nil { + return err + } + } + + // check whether the request user has been allowed to set clusterset label + var clusterSetName string + if len(managedCluster.Labels) > 0 { + clusterSetName = managedCluster.Labels[clusterv1beta2.ClusterSetLabel] + } + + return r.allowSetClusterSetLabel(req.UserInfo, "", clusterSetName) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *ManagedClusterWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error { + managedCluster, ok := newObj.(*v1.ManagedCluster) + if !ok { + return apierrors.NewBadRequest("Request new cluster obj format is not right") + } + oldManagedCluster, ok := oldObj.(*v1.ManagedCluster) + if !ok { + return apierrors.NewBadRequest("Request old cluster obj format is not right") + } + req, err := admission.RequestFromContext(ctx) + if err != nil { + return apierrors.NewBadRequest(err.Error()) + } + + //Validate if Spec.ManagedClusterClientConfigs is Valid HTTPS URL + err = r.validateManagedClusterObj(*managedCluster) + if err != nil { + return err + } + + // the HubAcceptsClient field is changed, we need to check the request user whether + // has been allowed to change the HubAcceptsClient field with SubjectAccessReview api + if managedCluster.Spec.HubAcceptsClient { + err := r.allowUpdateAcceptField(managedCluster.Name, req.UserInfo) + if err != nil { + return err + } + } + + // check whether the request user has been allowed to set clusterset label + var originalClusterSetName, currentClusterSetName string + if len(oldManagedCluster.Labels) > 0 { + originalClusterSetName = oldManagedCluster.Labels[clusterv1beta2.ClusterSetLabel] + } + if len(managedCluster.Labels) > 0 { + currentClusterSetName = managedCluster.Labels[clusterv1beta2.ClusterSetLabel] + } + + return r.allowSetClusterSetLabel(req.UserInfo, originalClusterSetName, currentClusterSetName) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *ManagedClusterWebhook) ValidateDelete(_ context.Context, obj runtime.Object) error { + return nil +} + +// validateManagedClusterObj validates the fileds of ManagedCluster object +func (r *ManagedClusterWebhook) validateManagedClusterObj(cluster v1.ManagedCluster) error { + errs := []error{} + + // there are no spoke client configs, finish the validation process + if len(cluster.Spec.ManagedClusterClientConfigs) == 0 { + return nil + } + + // validate the url in spoke client configs + for _, clientConfig := range cluster.Spec.ManagedClusterClientConfigs { + if !helpers.IsValidHTTPSURL(clientConfig.URL) { + errs = append(errs, fmt.Errorf("url %q is invalid in client configs", clientConfig.URL)) + } + } + if len(errs) != 0 { + return apierrors.NewBadRequest(operatorhelpers.NewMultiLineAggregate(errs).Error()) + } + return nil +} + +// allowUpdateHubAcceptsClientField using SubjectAccessReview API to check whether a request user has been authorized to update +// HubAcceptsClient field +func (r *ManagedClusterWebhook) allowUpdateAcceptField(clusterName string, userInfo authenticationv1.UserInfo) error { + extra := make(map[string]authorizationv1.ExtraValue) + for k, v := range userInfo.Extra { + extra[k] = authorizationv1.ExtraValue(v) + } + + sar := &authorizationv1.SubjectAccessReview{ + Spec: authorizationv1.SubjectAccessReviewSpec{ + User: userInfo.Username, + UID: userInfo.UID, + Groups: userInfo.Groups, + Extra: extra, + ResourceAttributes: &authorizationv1.ResourceAttributes{ + Group: "register.open-cluster-management.io", + Resource: "managedclusters", + Verb: "update", + Subresource: "accept", + Name: clusterName, + }, + }, + } + sar, err := r.kubeClient.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{}) + if err != nil { + return apierrors.NewForbidden( + v1.Resource("managedclusters/accept"), + clusterName, + err, + ) + } + + if !sar.Status.Allowed { + return apierrors.NewForbidden( + v1.Resource("managedclusters/accept"), + clusterName, + fmt.Errorf("user %q cannot update the HubAcceptsClient field", userInfo.Username), + ) + } + + return nil +} + +// allowSetClusterSetLabel checks whether a request user has been authorized to set clusterset label +func (r *ManagedClusterWebhook) allowSetClusterSetLabel(userInfo authenticationv1.UserInfo, originalClusterSet, newClusterSet string) error { + if originalClusterSet == newClusterSet { + return nil + } + + if len(originalClusterSet) > 0 { + err := r.allowUpdateClusterSet(userInfo, originalClusterSet) + if err != nil { + return err + } + } + + if len(newClusterSet) > 0 { + err := r.allowUpdateClusterSet(userInfo, newClusterSet) + if err != nil { + return err + } + } + + return nil +} + +// allowUpdateClusterSet checks whether a request user has been authorized to add/remove a ManagedCluster +// to/from the ManagedClusterSet +func (r *ManagedClusterWebhook) allowUpdateClusterSet(userInfo authenticationv1.UserInfo, clusterSetName string) error { + extra := make(map[string]authorizationv1.ExtraValue) + for k, v := range userInfo.Extra { + extra[k] = authorizationv1.ExtraValue(v) + } + + sar := &authorizationv1.SubjectAccessReview{ + Spec: authorizationv1.SubjectAccessReviewSpec{ + User: userInfo.Username, + UID: userInfo.UID, + Groups: userInfo.Groups, + Extra: extra, + ResourceAttributes: &authorizationv1.ResourceAttributes{ + Group: "cluster.open-cluster-management.io", + Resource: "managedclustersets", + Subresource: "join", + Name: clusterSetName, + Verb: "create", + }, + }, + } + sar, err := r.kubeClient.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{}) + if err != nil { + return apierrors.NewForbidden( + v1.Resource("managedclustersets/join"), + clusterSetName, + err, + ) + } + + if !sar.Status.Allowed { + return apierrors.NewForbidden( + v1.Resource("managedclustersets/join"), + clusterSetName, + fmt.Errorf("user %q cannot add/remove a ManagedCluster to/from ManagedClusterSet %q", userInfo.Username, clusterSetName), + ) + } + + return nil +} diff --git a/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1/webhook.go b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1/webhook.go new file mode 100644 index 000000000..ae579bc4c --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1/webhook.go @@ -0,0 +1,33 @@ +package v1 + +import ( + "k8s.io/client-go/kubernetes" + v1 "open-cluster-management.io/api/cluster/v1" + ctrl "sigs.k8s.io/controller-runtime" +) + +type ManagedClusterWebhook struct { + kubeClient kubernetes.Interface +} + +func (r *ManagedClusterWebhook) Init(mgr ctrl.Manager) error { + err := r.SetupWebhookWithManager(mgr) + if err != nil { + return err + } + r.kubeClient, err = kubernetes.NewForConfig(mgr.GetConfig()) + return err +} + +// SetExternalKubeClientSet is function to enable the webhook injecting to kube admssion +func (r *ManagedClusterWebhook) SetExternalKubeClientSet(client kubernetes.Interface) { + r.kubeClient = client +} + +func (r *ManagedClusterWebhook) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + WithValidator(r). + WithDefaulter(r). + For(&v1.ManagedCluster{}). + Complete() +} diff --git a/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta1/managedclusterset_conversion.go b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta1/managedclusterset_conversion.go new file mode 100644 index 000000000..bdd88b8d9 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta1/managedclusterset_conversion.go @@ -0,0 +1,4 @@ +package v1beta1 + +// Hub marks this type as a conversion hub. +func (r *ManagedClusterSet) Hub() {} diff --git a/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta1/managedclustersetbinding_validating.go b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta1/managedclustersetbinding_validating.go new file mode 100644 index 000000000..10ff7946b --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta1/managedclustersetbinding_validating.go @@ -0,0 +1,99 @@ +package v1beta1 + +import ( + "context" + "fmt" + + authenticationv1 "k8s.io/api/authentication/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/kubernetes" + + authorizationv1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/runtime" + "open-cluster-management.io/api/cluster/v1beta1" + + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +var _ webhook.CustomValidator = &ManagedClusterSetBindingWebhook{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (b *ManagedClusterSetBindingWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) error { + binding, ok := obj.(*v1beta1.ManagedClusterSetBinding) + if !ok { + return apierrors.NewBadRequest("Request clustersetbinding obj format is not right") + } + + // force the instance name to match the target cluster set name + if binding.Name != binding.Spec.ClusterSet { + return apierrors.NewBadRequest("The ManagedClusterSetBinding must have the same name as the target ManagedClusterSet") + } + + req, err := admission.RequestFromContext(ctx) + if err != nil { + return apierrors.NewBadRequest(err.Error()) + } + return AllowBindingToClusterSet(b.kubeClient, binding.Spec.ClusterSet, req.UserInfo) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (b *ManagedClusterSetBindingWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error { + binding, ok := newObj.(*v1beta1.ManagedClusterSetBinding) + if !ok { + return apierrors.NewBadRequest("Request clustersetbinding obj format is not right") + } + + // force the instance name to match the target cluster set name + if binding.Name != binding.Spec.ClusterSet { + return apierrors.NewBadRequest("The ManagedClusterSetBinding must have the same name as the target ManagedClusterSet") + } + return nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (b *ManagedClusterSetBindingWebhook) ValidateDelete(_ context.Context, obj runtime.Object) error { + return nil +} + +// allowBindingToClusterSet checks if the user has permission to bind a particular cluster set +func AllowBindingToClusterSet(kubeClient kubernetes.Interface, clusterSetName string, userInfo authenticationv1.UserInfo) error { + extra := make(map[string]authorizationv1.ExtraValue) + for k, v := range userInfo.Extra { + extra[k] = authorizationv1.ExtraValue(v) + } + + sar := &authorizationv1.SubjectAccessReview{ + Spec: authorizationv1.SubjectAccessReviewSpec{ + User: userInfo.Username, + UID: userInfo.UID, + Groups: userInfo.Groups, + Extra: extra, + ResourceAttributes: &authorizationv1.ResourceAttributes{ + Group: "cluster.open-cluster-management.io", + Resource: "managedclustersets", + Subresource: "bind", + Verb: "create", + Name: clusterSetName, + }, + }, + } + sar, err := kubeClient.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{}) + if err != nil { + return apierrors.NewForbidden( + v1beta1.Resource("managedclustersets/bind"), + clusterSetName, + err, + ) + } + if !sar.Status.Allowed { + return apierrors.NewForbidden( + v1beta1.Resource("managedclustersets/bind"), + clusterSetName, + fmt.Errorf("user %q is not allowed to bind cluster set %q", userInfo.Username, clusterSetName), + ) + } + return nil +} diff --git a/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta1/webhook.go b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta1/webhook.go new file mode 100644 index 000000000..763cb676c --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta1/webhook.go @@ -0,0 +1,68 @@ +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + "open-cluster-management.io/api/cluster/v1beta1" + ctrl "sigs.k8s.io/controller-runtime" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(v1beta1.GroupVersion, + &ManagedClusterSet{}, + &v1beta1.ManagedClusterSetBinding{}, + ) + metav1.AddToGroupVersion(scheme, v1beta1.GroupVersion) + return nil +} + +type ManagedClusterSet struct { + v1beta1.ManagedClusterSet +} + +type ManagedClusterSetBindingWebhook struct { + kubeClient kubernetes.Interface +} + +func (r *ManagedClusterSet) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +func (b *ManagedClusterSetBindingWebhook) Init(mgr ctrl.Manager) error { + err := b.SetupWebhookWithManager(mgr) + if err != nil { + return err + } + b.kubeClient, err = kubernetes.NewForConfig(mgr.GetConfig()) + return err +} + +// SetExternalKubeClientSet is function to enable the webhook injecting to kube admssion +func (b *ManagedClusterSetBindingWebhook) SetExternalKubeClientSet(client kubernetes.Interface) { + b.kubeClient = client +} + +func (b *ManagedClusterSetBindingWebhook) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + WithValidator(b). + For(&v1beta1.ManagedClusterSetBinding{}). + Complete() +} + +func ClustersetbindingGroupKind() schema.GroupKind { + return schema.GroupKind{ + Group: v1beta1.GroupName, + Kind: "ManagedClusterSetBinding", + } +} diff --git a/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta2/managedclusterset_conversion.go b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta2/managedclusterset_conversion.go new file mode 100644 index 000000000..721926b73 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta2/managedclusterset_conversion.go @@ -0,0 +1,50 @@ +package v1beta2 + +import ( + "k8s.io/klog/v2" + "open-cluster-management.io/api/cluster/v1beta1" + "open-cluster-management.io/api/cluster/v1beta2" + internalv1beta1 "open-cluster-management.io/registration/pkg/webhook/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +/* +ConvertTo is expected to modify its argument to contain the converted object. +Most of the conversion is straightforward copying, except for converting our changed field. +*/ +// ConvertTo converts this ManagedClusterSet to the Hub(v1beta1) version. +func (src *ManagedClusterSet) ConvertTo(dstRaw conversion.Hub) error { + dst := dstRaw.(*internalv1beta1.ManagedClusterSet) + klog.V(4).Infof("Converting ManagedClusterset %v from v1beta2 to v1beta1", src.Name) + + dst.ObjectMeta = src.ObjectMeta + if len(src.Spec.ClusterSelector.SelectorType) == 0 || src.Spec.ClusterSelector.SelectorType == v1beta2.ExclusiveClusterSetLabel { + dst.Spec.ClusterSelector.SelectorType = v1beta1.SelectorType(v1beta1.LegacyClusterSetLabel) + } else { + dst.Spec.ClusterSelector.SelectorType = v1beta1.SelectorType(src.Spec.ClusterSelector.SelectorType) + dst.Spec.ClusterSelector.LabelSelector = src.Spec.ClusterSelector.LabelSelector + } + dst.Status = v1beta1.ManagedClusterSetStatus(src.Status) + return nil +} + +/* +ConvertFrom is expected to modify its receiver to contain the converted object. +Most of the conversion is straightforward copying, except for converting our changed field. +*/ + +// ConvertFrom converts from the Hub version (v1beta1) to this version. +func (dst *ManagedClusterSet) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*internalv1beta1.ManagedClusterSet) + klog.V(4).Infof("Converting ManagedClusterset %v from v1beta1 to v1beta2", src.Name) + + dst.ObjectMeta = src.ObjectMeta + if len(src.Spec.ClusterSelector.SelectorType) == 0 || src.Spec.ClusterSelector.SelectorType == v1beta1.LegacyClusterSetLabel { + dst.Spec.ClusterSelector.SelectorType = v1beta2.ExclusiveClusterSetLabel + } else { + dst.Spec.ClusterSelector.SelectorType = v1beta2.SelectorType(src.Spec.ClusterSelector.SelectorType) + dst.Spec.ClusterSelector.LabelSelector = src.Spec.ClusterSelector.LabelSelector + } + dst.Status = v1beta2.ManagedClusterSetStatus(src.Status) + return nil +} diff --git a/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta2/managedclustersetbinding_validating.go b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta2/managedclustersetbinding_validating.go new file mode 100644 index 000000000..7cb865854 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta2/managedclustersetbinding_validating.go @@ -0,0 +1,54 @@ +package v1beta2 + +import ( + "context" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + + "k8s.io/apimachinery/pkg/runtime" + "open-cluster-management.io/api/cluster/v1beta2" + internalv1beta1 "open-cluster-management.io/registration/pkg/webhook/v1beta1" + + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +var _ webhook.CustomValidator = &ManagedClusterSetBindingWebhook{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (b *ManagedClusterSetBindingWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) error { + binding, ok := obj.(*v1beta2.ManagedClusterSetBinding) + if !ok { + return apierrors.NewBadRequest("Request clustersetbinding obj format is not right") + } + + // force the instance name to match the target cluster set name + if binding.Name != binding.Spec.ClusterSet { + return apierrors.NewBadRequest("The ManagedClusterSetBinding must have the same name as the target ManagedClusterSet") + } + + req, err := admission.RequestFromContext(ctx) + if err != nil { + return apierrors.NewBadRequest(err.Error()) + } + return internalv1beta1.AllowBindingToClusterSet(b.kubeClient, binding.Spec.ClusterSet, req.UserInfo) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (b *ManagedClusterSetBindingWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error { + binding, ok := newObj.(*v1beta2.ManagedClusterSetBinding) + if !ok { + return apierrors.NewBadRequest("Request clustersetbinding obj format is not right") + } + + // force the instance name to match the target cluster set name + if binding.Name != binding.Spec.ClusterSet { + return apierrors.NewBadRequest("The ManagedClusterSetBinding must have the same name as the target ManagedClusterSet") + } + return nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (b *ManagedClusterSetBindingWebhook) ValidateDelete(_ context.Context, obj runtime.Object) error { + return nil +} diff --git a/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta2/webhook.go b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta2/webhook.go new file mode 100644 index 000000000..be3ea76a8 --- /dev/null +++ b/controlplane/vendor/open-cluster-management.io/registration/pkg/webhook/v1beta2/webhook.go @@ -0,0 +1,60 @@ +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + "open-cluster-management.io/api/cluster/v1beta2" + ctrl "sigs.k8s.io/controller-runtime" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(v1beta2.GroupVersion, + &ManagedClusterSet{}, + &v1beta2.ManagedClusterSetBinding{}, + ) + metav1.AddToGroupVersion(scheme, v1beta2.GroupVersion) + return nil +} + +type ManagedClusterSet struct { + v1beta2.ManagedClusterSet +} + +type ManagedClusterSetBindingWebhook struct { + kubeClient kubernetes.Interface +} + +func (src *ManagedClusterSet) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(src). + Complete() +} + +func (b *ManagedClusterSetBindingWebhook) Init(mgr ctrl.Manager) error { + err := b.SetupWebhookWithManager(mgr) + if err != nil { + return err + } + b.kubeClient, err = kubernetes.NewForConfig(mgr.GetConfig()) + return err +} + +// SetExternalKubeClientSet is function to enable the webhook injecting to kube admssion +func (b *ManagedClusterSetBindingWebhook) SetExternalKubeClientSet(client kubernetes.Interface) { + b.kubeClient = client +} + +func (b *ManagedClusterSetBindingWebhook) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + WithValidator(b). + For(&v1beta2.ManagedClusterSetBinding{}). + Complete() +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/.gitignore b/controlplane/vendor/sigs.k8s.io/controller-runtime/.gitignore new file mode 100644 index 000000000..c2c72faf3 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/.gitignore @@ -0,0 +1,24 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# editor and IDE paraphernalia +.idea +*.swp +*.swo +*~ + +# Vscode files +.vscode + +# Tools binaries. +hack/tools/bin diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/.golangci.yml b/controlplane/vendor/sigs.k8s.io/controller-runtime/.golangci.yml new file mode 100644 index 000000000..77f528ff0 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/.golangci.yml @@ -0,0 +1,141 @@ +linters: + disable-all: true + enable: + - asciicheck + - bodyclose + - deadcode + - depguard + - dogsled + - errcheck + - errorlint + - exportloopref + - goconst + - gocritic + - gocyclo + - godot + - gofmt + - goimports + - goprintffuncname + - gosec + - gosimple + - govet + - ifshort + - importas + - ineffassign + - misspell + - nakedret + - nilerr + - nolintlint + - prealloc + - revive + - rowserrcheck + - staticcheck + - structcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - varcheck + - whitespace + +linters-settings: + ifshort: + # Maximum length of variable declaration measured in number of characters, after which linter won't suggest using short syntax. + max-decl-chars: 50 + importas: + no-unaliased: true + alias: + # Kubernetes + - pkg: k8s.io/api/core/v1 + alias: corev1 + - pkg: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 + alias: apiextensionsv1 + - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 + alias: metav1 + - pkg: k8s.io/apimachinery/pkg/api/errors + alias: apierrors + - pkg: k8s.io/apimachinery/pkg/util/errors + alias: kerrors + # Controller Runtime + - pkg: sigs.k8s.io/controller-runtime + alias: ctrl + staticcheck: + go: "1.18" + stylecheck: + go: "1.18" + depguard: + include-go-root: true + packages: + - io/ioutil # https://go.dev/doc/go1.16#ioutil + +issues: + max-same-issues: 0 + max-issues-per-linter: 0 + # We are disabling default golangci exclusions because we want to help reviewers to focus on reviewing the most relevant + # changes in PRs and avoid nitpicking. + exclude-use-default: false + # List of regexps of issue texts to exclude, empty list by default. + exclude: + # The following are being worked on to remove their exclusion. This list should be reduced or go away all together over time. + # If it is decided they will not be addressed they should be moved above this comment. + - Subprocess launch(ed with variable|ing should be audited) + - (G204|G104|G307) + - "ST1000: at least one file in a package should have a package comment" + exclude-rules: + - linters: + - gosec + text: "G108: Profiling endpoint is automatically exposed on /debug/pprof" + - linters: + - revive + text: "exported: exported method .*\\.(Reconcile|SetupWithManager|SetupWebhookWithManager) should have comment or be unexported" + - linters: + - errcheck + text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked + # With Go 1.16, the new embed directive can be used with an un-named import, + # revive (previously, golint) only allows these to be imported in a main.go, which wouldn't work for us. + # This directive allows the embed package to be imported with an underscore everywhere. + - linters: + - revive + source: _ "embed" + # Exclude some packages or code to require comments, for example test code, or fake clients. + - linters: + - revive + text: exported (method|function|type|const) (.+) should have comment or be unexported + source: (func|type).*Fake.* + - linters: + - revive + text: exported (method|function|type|const) (.+) should have comment or be unexported + path: fake_\.go + # Disable unparam "always receives" which might not be really + # useful when building libraries. + - linters: + - unparam + text: always receives + # Dot imports for gomega or ginkgo are allowed + # within test files. + - path: _test\.go + text: should not use dot imports + - path: _test\.go + text: cyclomatic complexity + - path: _test\.go + text: "G107: Potential HTTP request made with variable url" + # Append should be able to assign to a different var/slice. + - linters: + - gocritic + text: "appendAssign: append result not assigned to the same slice" + - linters: + - gocritic + text: "singleCaseSwitch: should rewrite switch statement to if statement" + # It considers all file access to a filename that comes from a variable problematic, + # which is naiv at best. + - linters: + - gosec + text: "G304: Potential file inclusion via variable" + +run: + timeout: 10m + skip-files: + - "zz_generated.*\\.go$" + - ".*conversion.*\\.go$" + allow-parallel-runners: true diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md b/controlplane/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md new file mode 100644 index 000000000..2c0ea1f66 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md @@ -0,0 +1,19 @@ +# Contributing guidelines + +## Sign the CLA + +Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests. + +Please see https://git.k8s.io/community/CLA.md for more info + +## Contributing steps + +1. Submit an issue describing your proposed change to the repo in question. +1. The [repo owners](OWNERS) will respond to your issue promptly. +1. If your proposed change is accepted, and you haven't already done so, sign a Contributor License Agreement (see details above). +1. Fork the desired repo, develop and test your code changes. +1. Submit a pull request. + +## Test locally + +Run the command `make test` to test the changes locally. diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/FAQ.md b/controlplane/vendor/sigs.k8s.io/controller-runtime/FAQ.md new file mode 100644 index 000000000..c21b29e28 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/FAQ.md @@ -0,0 +1,81 @@ +# FAQ + +### Q: How do I know which type of object a controller references? + +**A**: Each controller should only reconcile one object type. Other +affected objects should be mapped to a single type of root object, using +the `EnqueueRequestForOwner` or `EnqueueRequestsFromMapFunc` event +handlers, and potentially indices. Then, your Reconcile method should +attempt to reconcile *all* state for that given root objects. + +### Q: How do I have different logic in my reconciler for different types of events (e.g. create, update, delete)? + +**A**: You should not. Reconcile functions should be idempotent, and +should always reconcile state by reading all the state it needs, then +writing updates. This allows your reconciler to correctly respond to +generic events, adjust to skipped or coalesced events, and easily deal +with application startup. The controller will enqueue reconcile requests +for both old and new objects if a mapping changes, but it's your +responsibility to make sure you have enough information to be able clean +up state that's no longer referenced. + +### Q: My cache might be stale if I read from a cache! How should I deal with that? + +**A**: There are several different approaches that can be taken, depending +on your situation. + +- When you can, take advantage of optimistic locking: use deterministic + names for objects you create, so that the Kubernetes API server will + warn you if the object already exists. Many controllers in Kubernetes + take this approach: the StatefulSet controller appends a specific number + to each pod that it creates, while the Deployment controller hashes the + pod template spec and appends that. + +- In the few cases when you cannot take advantage of deterministic names + (e.g. when using generateName), it may be useful in to track which + actions you took, and assume that they need to be repeated if they don't + occur after a given time (e.g. using a requeue result). This is what + the ReplicaSet controller does. + +In general, write your controller with the assumption that information +will eventually be correct, but may be slightly out of date. Make sure +that your reconcile function enforces the entire state of the world each +time it runs. If none of this works for you, you can always construct +a client that reads directly from the API server, but this is generally +considered to be a last resort, and the two approaches above should +generally cover most circumstances. + +### Q: Where's the fake client? How do I use it? + +**A**: The fake client +[exists](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake), +but we generally recommend using +[envtest.Environment](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#Environment) +to test against a real API server. In our experience, tests using fake +clients gradually re-implement poorly-written impressions of a real API +server, which leads to hard-to-maintain, complex test code. + +### Q: How should I write tests? Any suggestions for getting started? + +- Use the aforementioned + [envtest.Environment](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#Environment) + to spin up a real API server instead of trying to mock one out. + +- Structure your tests to check that the state of the world is as you + expect it, *not* that a particular set of API calls were made, when + working with Kubernetes APIs. This will allow you to more easily + refactor and improve the internals of your controllers without changing + your tests. + +- Remember that any time you're interacting with the API server, changes + may have some delay between write time and reconcile time. + +### Q: What are these errors about no Kind being registered for a type? + +**A**: You're probably missing a fully-set-up Scheme. Schemes record the +mapping between Go types and group-version-kinds in Kubernetes. In +general, your application should have its own Scheme containing the types +from the API groups that it needs (be they Kubernetes types or your own). +See the [scheme builder +docs](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/scheme) for +more information. diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/LICENSE b/controlplane/vendor/sigs.k8s.io/controller-runtime/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/Makefile b/controlplane/vendor/sigs.k8s.io/controller-runtime/Makefile new file mode 100644 index 000000000..36647c697 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/Makefile @@ -0,0 +1,123 @@ +#!/usr/bin/env bash + +# Copyright 2020 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# If you update this file, please follow +# https://suva.sh/posts/well-documented-makefiles + +## -------------------------------------- +## General +## -------------------------------------- + +SHELL:=/usr/bin/env bash +.DEFAULT_GOAL:=help + +# Use GOPROXY environment variable if set +GOPROXY := $(shell go env GOPROXY) +ifeq ($(GOPROXY),) +GOPROXY := https://proxy.golang.org +endif +export GOPROXY + +# Active module mode, as we use go modules to manage dependencies +export GO111MODULE=on + +# Tools. +TOOLS_DIR := hack/tools +TOOLS_BIN_DIR := $(TOOLS_DIR)/bin +GOLANGCI_LINT := $(abspath $(TOOLS_BIN_DIR)/golangci-lint) +GO_APIDIFF := $(TOOLS_BIN_DIR)/go-apidiff +CONTROLLER_GEN := $(TOOLS_BIN_DIR)/controller-gen +ENVTEST_DIR := $(abspath tools/setup-envtest) + +# The help will print out all targets with their descriptions organized bellow their categories. The categories are represented by `##@` and the target descriptions by `##`. +# The awk commands is responsible to read the entire set of makefiles included in this invocation, looking for lines of the file as xyz: ## something, and then pretty-format the target and help. Then, if there's a line with ##@ something, that gets pretty-printed as a category. +# More info over the usage of ANSI control characters for terminal formatting: https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info over awk command: http://linuxcommand.org/lc3_adv_awk.php +.PHONY: help +help: ## Display this help + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +## -------------------------------------- +## Testing +## -------------------------------------- + +.PHONY: test +test: test-tools ## Run the script check-everything.sh which will check all. + TRACE=1 ./hack/check-everything.sh + +.PHONY: test-tools +test-tools: ## tests the tools codebase (setup-envtest) + cd tools/setup-envtest && go test ./... + +## -------------------------------------- +## Binaries +## -------------------------------------- + +$(GO_APIDIFF): $(TOOLS_DIR)/go.mod # Build go-apidiff from tools folder. + cd $(TOOLS_DIR) && go build -tags=tools -o bin/go-apidiff github.com/joelanford/go-apidiff + +$(CONTROLLER_GEN): $(TOOLS_DIR)/go.mod # Build controller-gen from tools folder. + cd $(TOOLS_DIR) && go build -tags=tools -o bin/controller-gen sigs.k8s.io/controller-tools/cmd/controller-gen + +$(GOLANGCI_LINT): .github/workflows/golangci-lint.yml # Download golanci-lint using hack script into tools folder. + hack/ensure-golangci-lint.sh \ + -b $(TOOLS_BIN_DIR) \ + $(shell cat .github/workflows/golangci-lint.yml | grep version | sed 's/.*version: //') + +## -------------------------------------- +## Linting +## -------------------------------------- + +.PHONY: lint +lint: $(GOLANGCI_LINT) ## Lint codebase + $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS) + cd tools/setup-envtest; $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS) + +.PHONY: lint-fix +lint-fix: $(GOLANGCI_LINT) ## Lint the codebase and run auto-fixers if supported by the linter. + GOLANGCI_LINT_EXTRA_ARGS=--fix $(MAKE) lint + +## -------------------------------------- +## Generate +## -------------------------------------- + +.PHONY: modules +modules: ## Runs go mod to ensure modules are up to date. + go mod tidy + cd $(TOOLS_DIR); go mod tidy + cd $(ENVTEST_DIR); go mod tidy + +.PHONY: generate +generate: $(CONTROLLER_GEN) ## Runs controller-gen for internal types for config file + $(CONTROLLER_GEN) object paths="./pkg/config/v1alpha1/...;./examples/configfile/custom/v1alpha1/..." + +## -------------------------------------- +## Cleanup / Verification +## -------------------------------------- + +.PHONY: clean +clean: ## Cleanup. + $(MAKE) clean-bin + +.PHONY: clean-bin +clean-bin: ## Remove all generated binaries. + rm -rf hack/tools/bin + +.PHONY: verify-modules +verify-modules: modules + @if !(git diff --quiet HEAD -- go.sum go.mod); then \ + echo "go module files are out of date, please run 'make modules'"; exit 1; \ + fi diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/OWNERS b/controlplane/vendor/sigs.k8s.io/controller-runtime/OWNERS new file mode 100644 index 000000000..4b1fa044b --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md + +approvers: + - controller-runtime-admins + - controller-runtime-maintainers + - controller-runtime-approvers +reviewers: + - controller-runtime-admins + - controller-runtime-reviewers + - controller-runtime-approvers diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES b/controlplane/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES new file mode 100644 index 000000000..82f7a2bef --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES @@ -0,0 +1,41 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md + +aliases: + # active folks who can be contacted to perform admin-related + # tasks on the repo, or otherwise approve any PRS. + controller-runtime-admins: + - droot + - mengqiy + - pwittrock + + # non-admin folks who have write-access and can approve any PRs in the repo + controller-runtime-maintainers: + - vincepri + - joelanford + + # non-admin folks who can approve any PRs in the repo + controller-runtime-approvers: + - gerred + - shawn-hurley + - alvaroaleman + + # folks who can review and LGTM any PRs in the repo (doesn't + # include approvers & admins -- those count too via the OWNERS + # file) + controller-runtime-reviewers: + - alenkacz + - vincepri + - alexeldeib + - varshaprasad96 + - fillzpp + + # folks to can approve things in the directly-ported + # testing_frameworks portions of the codebase + testing-integration-approvers: + - apelisse + - hoegaarden + + # folks who may have context on ancient history, + # but are no longer directly involved + controller-runtime-emeritus-maintainers: + - directxman12 diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/README.md b/controlplane/vendor/sigs.k8s.io/controller-runtime/README.md new file mode 100644 index 000000000..cd358b94f --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/README.md @@ -0,0 +1,66 @@ +[![Go Report Card](https://goreportcard.com/badge/sigs.k8s.io/controller-runtime)](https://goreportcard.com/report/sigs.k8s.io/controller-runtime) +[![godoc](https://pkg.go.dev/badge/sigs.k8s.io/controller-runtime)](https://pkg.go.dev/sigs.k8s.io/controller-runtime) + +# Kubernetes controller-runtime Project + +The Kubernetes controller-runtime Project is a set of go libraries for building +Controllers. It is leveraged by [Kubebuilder](https://book.kubebuilder.io/) and +[Operator SDK](https://github.com/operator-framework/operator-sdk). Both are +a great place to start for new projects. See +[Kubebuilder's Quick Start](https://book.kubebuilder.io/quick-start.html) to +see how it can be used. + +Documentation: + +- [Package overview](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg) +- [Basic controller using builder](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/builder#example-Builder) +- [Creating a manager](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/manager#example-New) +- [Creating a controller](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/controller#example-New) +- [Examples](https://github.com/kubernetes-sigs/controller-runtime/blob/master/examples) +- [Designs](https://github.com/kubernetes-sigs/controller-runtime/blob/master/designs) + +# Versioning, Maintenance, and Compatibility + +The full documentation can be found at [VERSIONING.md](VERSIONING.md), but TL;DR: + +Users: + +- We follow [Semantic Versioning (semver)](https://semver.org) +- Use releases with your dependency management to ensure that you get compatible code +- The master branch contains all the latest code, some of which may break compatibility (so "normal" `go get` is not recommended) + +Contributors: + +- All code PR must be labeled with :bug: (patch fixes), :sparkles: (backwards-compatible features), or :warning: (breaking changes) +- Breaking changes will find their way into the next major release, other changes will go into an semi-immediate patch or minor release +- For a quick PR template suggesting the right information, use one of these PR templates: + * [Breaking Changes/Features](/.github/PULL_REQUEST_TEMPLATE/breaking_change.md) + * [Backwards-Compatible Features](/.github/PULL_REQUEST_TEMPLATE/compat_feature.md) + * [Bug fixes](/.github/PULL_REQUEST_TEMPLATE/bug_fix.md) + * [Documentation Changes](/.github/PULL_REQUEST_TEMPLATE/docs.md) + * [Test/Build/Other Changes](/.github/PULL_REQUEST_TEMPLATE/other.md) + +## FAQ + +See [FAQ.md](FAQ.md) + +## Community, discussion, contribution, and support + +Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). + +controller-runtime is a subproject of the [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder) project +in sig apimachinery. + +You can reach the maintainers of this project at: + +- Slack channel: [#kubebuilder](http://slack.k8s.io/#kubebuilder) +- Google Group: [kubebuilder@googlegroups.com](https://groups.google.com/forum/#!forum/kubebuilder) + +## Contributing +Contributions are greatly appreciated. The maintainers actively manage the issues list, and try to highlight issues suitable for newcomers. +The project follows the typical GitHub pull request model. See [CONTRIBUTING.md](CONTRIBUTING.md) for more details. +Before starting any work, please either comment on an existing issue, or file a new one. + +## Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS b/controlplane/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS new file mode 100644 index 000000000..32e6a3b90 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS @@ -0,0 +1,14 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +pwittrock +droot diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md b/controlplane/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md new file mode 100644 index 000000000..b3cfc6651 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md @@ -0,0 +1,169 @@ +Logging Guidelines +================== + +controller-runtime uses a kind of logging called *structured logging*. If +you've used a library like Zap or logrus before, you'll be familiar with +the concepts we use. If you've only used a logging library like the "log" +package (in the Go standard library) or "glog" (in Kubernetes), you'll +need to adjust how you think about logging a bit. + +### Getting Started With Structured Logging + +With structured logging, we associate a *constant* log message with some +variable key-value pairs. For instance, suppose we wanted to log that we +were starting reconciliation on a pod. In the Go standard library logger, +we might write: + +```go +log.Printf("starting reconciliation for pod %s/%s", podNamespace, podName) +``` + +In controller-runtime, we'd instead write: + +```go +logger.Info("starting reconciliation", "pod", req.NamespacedNamed) +``` + +or even write + +```go +func (r *Reconciler) Reconcile(req reconcile.Request) (reconcile.Response, error) { + logger := logger.WithValues("pod", req.NamespacedName) + // do some stuff + logger.Info("starting reconciliation") +} +``` + +Notice how we've broken out the information that we want to convey into +a constant message (`"starting reconciliation"`) and some key-value pairs +that convey variable information (`"pod", req.NamespacedName`). We've +there-by added "structure" to our logs, which makes them easier to save +and search later, as well as correlate with metrics and events. + +All of controller-runtime's logging is done via +[logr](https://github.com/go-logr/logr), a generic interface for +structured logging. You can use whichever logging library you want to +implement the actual mechanics of the logging. controller-runtime +provides some helpers to make it easy to use +[Zap](https://go.uber.org/zap) as the implementation. + +You can configure the logging implementation using +`"sigs.k8s.io/controller-runtime/pkg/log".SetLogger`. That +package also contains the convenience functions for setting up Zap. + +You can get a handle to the the "root" logger using +`"sigs.k8s.io/controller-runtime/pkg/log".Log`, and can then call +`WithName` to create individual named loggers. You can call `WithName` +repeatedly to chain names together: + +```go +logger := log.Log.WithName("controller").WithName("replicaset") +// in reconcile... +logger = logger.WithValues("replicaset", req.NamespacedName) +// later on in reconcile... +logger.Info("doing things with pods", "pod", newPod) +``` + +As seen above, you can also call `WithValue` to create a new sub-logger +that always attaches some key-value pairs to a logger. + +Finally, you can use `V(1)` to mark a particular log line as "debug" logs: + +```go +logger.V(1).Info("this is particularly verbose!", "state of the world", +allKubernetesObjectsEverywhere) +``` + +While it's possible to use higher log levels, it's recommended that you +stick with `V(1)` or `V(0)` (which is equivalent to not specifying `V`), +and then filter later based on key-value pairs or messages; different +numbers tend to lose meaning easily over time, and you'll be left +wondering why particular logs lines are at `V(5)` instead of `V(7)`. + +## Logging errors + +Errors should *always* be logged with `log.Error`, which allows logr +implementations to provide special handling of errors (for instance, +providing stack traces in debug mode). + +It's acceptable to log call `log.Error` with a nil error object. This +conveys that an error occurred in some capacity, but that no actual +`error` object was involved. + +Errors returned by the `Reconcile` implementation of the `Reconciler` interface are commonly logged as a `Reconciler error`. +It's a developer choice to create an additional error log in the `Reconcile` implementation so a more specific file name and line for the error are returned. + +## Logging messages + +- Don't put variable content in your messages -- use key-value pairs for + that. Never use `fmt.Sprintf` in your message. + +- Try to match the terminology in your messages with your key-value pairs + -- for instance, if you have a key-value pairs `api version`, use the + term `APIVersion` instead of `GroupVersion` in your message. + +## Logging Kubernetes Objects + +Kubernetes objects should be logged directly, like `log.Info("this is +a Kubernetes object", "pod", somePod)`. controller-runtime provides +a special encoder for Zap that will transform Kubernetes objects into +`name, namespace, apiVersion, kind` objects, when available and not in +development mode. Other logr implementations should implement similar +logic. + +## Logging Structured Values (Key-Value pairs) + +- Use lower-case, space separated keys. For example `object` for objects, + `api version` for `APIVersion` + +- Be consistent across your application, and with controller-runtime when + possible. + +- Try to be brief but descriptive. + +- Match terminology in keys with terminology in the message. + +- Be careful logging non-Kubernetes objects verbatim if they're very + large. + +### Groups, Versions, and Kinds + +- Kinds should not be logged alone (they're meaningless alone). Use + a `GroupKind` object to log them instead, or a `GroupVersionKind` when + version is relevant. + +- If you need to log an API version string, use `api version` as the key + (formatted as with a `GroupVersion`, or as received directly from API + discovery). + +### Objects and Types + +- If code works with a generic Kubernetes `runtime.Object`, use the + `object` key. For specific objects, prefer the resource name as the key + (e.g. `pod` for `v1.Pod` objects). + +- For non-Kubernetes objects, the `object` key may also be used, if you + accept a generic interface. + +- When logging a raw type, log it using the `type` key, with a value of + `fmt.Sprintf("%T", typ)` + +- If there's specific context around a type, the key may be more specific, + but should end with `type` -- for instance, `OwnerType` should be logged + as `owner` in the context of `log.Error(err, "Could not get ObjectKinds + for OwnerType", `owner type`, fmt.Sprintf("%T"))`. When possible, favor + communicating kind instead. + +### Multiple things + +- When logging multiple things, simply pluralize the key. + +### controller-runtime Specifics + +- Reconcile requests should be logged as `request`, although normal code + should favor logging the key. + +- Reconcile keys should be logged as with the same key as if you were + logging the object directly (e.g. `log.Info("reconciling pod", "pod", + req.NamespacedName)`). This ends up having a similar effect to logging + the object directly. diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md b/controlplane/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md new file mode 100644 index 000000000..18779000e --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md @@ -0,0 +1,30 @@ +# Versioning and Branching in controller-runtime + +We follow the [common KubeBuilder versioning guidelines][guidelines], and +use the corresponding tooling. + +For the purposes of the aforementioned guidelines, controller-runtime +counts as a "library project", but otherwise follows the guidelines +exactly. + +[guidelines]: https://sigs.k8s.io/kubebuilder-release-tools/VERSIONING.md + +## Compatiblity and Release Support + +For release branches, we generally tend to support backporting one (1) +major release (`release-{X-1}` or `release-0.{Y-1}`), but may go back +further if the need arises and is very pressing (e.g. security updates). + +### Dependency Support + +Note the [guidelines on dependency versions][dep-versions]. Particularly: + +- We **DO** guarantee Kubernetes REST API compability -- if a given + version of controller-runtime stops working with what should be + a supported version of Kubernetes, this is almost certainly a bug. + +- We **DO NOT** guarantee any particular compability matrix between + kubernetes library dependencies (client-go, apimachinery, etc); Such + compability is infeasible due to the way those libraries are versioned. + +[dep-versions]: https://sigs.k8s.io/kubebuilder-release-tools/VERSIONING.md#kubernetes-version-compatibility diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/alias.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/alias.go new file mode 100644 index 000000000..29f964dcb --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/alias.go @@ -0,0 +1,150 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllerruntime + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client/config" + cfg "sigs.k8s.io/controller-runtime/pkg/config" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Builder builds an Application ControllerManagedBy (e.g. Operator) and returns a manager.Manager to start it. +type Builder = builder.Builder + +// Request contains the information necessary to reconcile a Kubernetes object. This includes the +// information to uniquely identify the object - its Name and Namespace. It does NOT contain information about +// any specific Event or the object contents itself. +type Request = reconcile.Request + +// Result contains the result of a Reconciler invocation. +type Result = reconcile.Result + +// Manager initializes shared dependencies such as Caches and Clients, and provides them to Runnables. +// A Manager is required to create Controllers. +type Manager = manager.Manager + +// Options are the arguments for creating a new Manager. +type Options = manager.Options + +// SchemeBuilder builds a new Scheme for mapping go types to Kubernetes GroupVersionKinds. +type SchemeBuilder = scheme.Builder + +// GroupVersion contains the "group" and the "version", which uniquely identifies the API. +type GroupVersion = schema.GroupVersion + +// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types. +type GroupResource = schema.GroupResource + +// TypeMeta describes an individual object in an API response or request +// with strings representing the type of the object and its API schema version. +// Structures that are versioned or persisted should inline TypeMeta. +// +// +k8s:deepcopy-gen=false +type TypeMeta = metav1.TypeMeta + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +type ObjectMeta = metav1.ObjectMeta + +var ( + // GetConfigOrDie creates a *rest.Config for talking to a Kubernetes apiserver. + // If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running + // in cluster and use the cluster provided kubeconfig. + // + // Will log an error and exit if there is an error creating the rest.Config. + GetConfigOrDie = config.GetConfigOrDie + + // GetConfig creates a *rest.Config for talking to a Kubernetes apiserver. + // If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running + // in cluster and use the cluster provided kubeconfig. + // + // Config precedence + // + // * --kubeconfig flag pointing at a file + // + // * KUBECONFIG environment variable pointing at a file + // + // * In-cluster config if running in cluster + // + // * $HOME/.kube/config if exists. + GetConfig = config.GetConfig + + // ConfigFile returns the cfg.File function for deferred config file loading, + // this is passed into Options{}.From() to populate the Options fields for + // the manager. + ConfigFile = cfg.File + + // NewControllerManagedBy returns a new controller builder that will be started by the provided Manager. + NewControllerManagedBy = builder.ControllerManagedBy + + // NewWebhookManagedBy returns a new webhook builder that will be started by the provided Manager. + NewWebhookManagedBy = builder.WebhookManagedBy + + // NewManager returns a new Manager for creating Controllers. + NewManager = manager.New + + // CreateOrUpdate creates or updates the given object obj in the Kubernetes + // cluster. The object's desired state should be reconciled with the existing + // state using the passed in ReconcileFn. obj must be a struct pointer so that + // obj can be updated with the content returned by the Server. + // + // It returns the executed operation and an error. + CreateOrUpdate = controllerutil.CreateOrUpdate + + // SetControllerReference sets owner as a Controller OwnerReference on owned. + // This is used for garbage collection of the owned object and for + // reconciling the owner object on changes to owned (with a Watch + EnqueueRequestForOwner). + // Since only one OwnerReference can be a controller, it returns an error if + // there is another OwnerReference with Controller flag set. + SetControllerReference = controllerutil.SetControllerReference + + // SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned + // which is closed on one of these signals. If a second signal is caught, the program + // is terminated with exit code 1. + SetupSignalHandler = signals.SetupSignalHandler + + // Log is the base logger used by controller-runtime. It delegates + // to another logr.Logger. You *must* call SetLogger to + // get any actual logging. + Log = log.Log + + // LoggerFrom returns a logger with predefined values from a context.Context. + // The logger, when used with controllers, can be expected to contain basic information about the object + // that's being reconciled like: + // - `reconciler group` and `reconciler kind` coming from the For(...) object passed in when building a controller. + // - `name` and `namespace` injected from the reconciliation request. + // + // This is meant to be used with the context supplied in a struct that satisfies the Reconciler interface. + LoggerFrom = log.FromContext + + // LoggerInto takes a context and sets the logger as one of its keys. + // + // This is meant to be used in reconcilers to enrich the logger within a context with additional values. + LoggerInto = log.IntoContext + + // SetLogger sets a concrete logging implementation for all deferred Loggers. + SetLogger = log.SetLogger +) diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/code-of-conduct.md b/controlplane/vendor/sigs.k8s.io/controller-runtime/code-of-conduct.md new file mode 100644 index 000000000..0d15c00cf --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/doc.go new file mode 100644 index 000000000..9f260c4f7 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/doc.go @@ -0,0 +1,127 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package controllerruntime provides tools to construct Kubernetes-style +// controllers that manipulate both Kubernetes CRDs and aggregated/built-in +// Kubernetes APIs. +// +// It defines easy helpers for the common use cases when building CRDs, built +// on top of customizable layers of abstraction. Common cases should be easy, +// and uncommon cases should be possible. In general, controller-runtime tries +// to guide users towards Kubernetes controller best-practices. +// +// Getting Started +// +// The main entrypoint for controller-runtime is this root package, which +// contains all of the common types needed to get started building controllers: +// import ( +// ctrl "sigs.k8s.io/controller-runtime" +// ) +// +// The examples in this package walk through a basic controller setup. The +// kubebuilder book (https://book.kubebuilder.io) has some more in-depth +// walkthroughs. +// +// controller-runtime favors structs with sane defaults over constructors, so +// it's fairly common to see structs being used directly in controller-runtime. +// +// Organization +// +// A brief-ish walkthrough of the layout of this library can be found below. Each +// package contains more information about how to use it. +// +// Frequently asked questions about using controller-runtime and designing +// controllers can be found at +// https://github.com/kubernetes-sigs/controller-runtime/blob/master/FAQ.md. +// +// Managers +// +// Every controller and webhook is ultimately run by a Manager (pkg/manager). A +// manager is responsible for running controllers and webhooks, and setting up +// common dependencies (pkg/runtime/inject), like shared caches and clients, as +// well as managing leader election (pkg/leaderelection). Managers are +// generally configured to gracefully shut down controllers on pod termination +// by wiring up a signal handler (pkg/manager/signals). +// +// Controllers +// +// Controllers (pkg/controller) use events (pkg/event) to eventually trigger +// reconcile requests. They may be constructed manually, but are often +// constructed with a Builder (pkg/builder), which eases the wiring of event +// sources (pkg/source), like Kubernetes API object changes, to event handlers +// (pkg/handler), like "enqueue a reconcile request for the object owner". +// Predicates (pkg/predicate) can be used to filter which events actually +// trigger reconciles. There are pre-written utilities for the common cases, and +// interfaces and helpers for advanced cases. +// +// Reconcilers +// +// Controller logic is implemented in terms of Reconcilers (pkg/reconcile). A +// Reconciler implements a function which takes a reconcile Request containing +// the name and namespace of the object to reconcile, reconciles the object, +// and returns a Response or an error indicating whether to requeue for a +// second round of processing. +// +// Clients and Caches +// +// Reconcilers use Clients (pkg/client) to access API objects. The default +// client provided by the manager reads from a local shared cache (pkg/cache) +// and writes directly to the API server, but clients can be constructed that +// only talk to the API server, without a cache. The Cache will auto-populate +// with watched objects, as well as when other structured objects are +// requested. The default split client does not promise to invalidate the cache +// during writes (nor does it promise sequential create/get coherence), and code +// should not assume a get immediately following a create/update will return +// the updated resource. Caches may also have indexes, which can be created via +// a FieldIndexer (pkg/client) obtained from the manager. Indexes can used to +// quickly and easily look up all objects with certain fields set. Reconcilers +// may retrieve event recorders (pkg/recorder) to emit events using the +// manager. +// +// Schemes +// +// Clients, Caches, and many other things in Kubernetes use Schemes +// (pkg/scheme) to associate Go types to Kubernetes API Kinds +// (Group-Version-Kinds, to be specific). +// +// Webhooks +// +// Similarly, webhooks (pkg/webhook/admission) may be implemented directly, but +// are often constructed using a builder (pkg/webhook/admission/builder). They +// are run via a server (pkg/webhook) which is managed by a Manager. +// +// Logging and Metrics +// +// Logging (pkg/log) in controller-runtime is done via structured logs, using a +// log set of interfaces called logr +// (https://pkg.go.dev/github.com/go-logr/logr). While controller-runtime +// provides easy setup for using Zap (https://go.uber.org/zap, pkg/log/zap), +// you can provide any implementation of logr as the base logger for +// controller-runtime. +// +// Metrics (pkg/metrics) provided by controller-runtime are registered into a +// controller-runtime-specific Prometheus metrics registry. The manager can +// serve these by an HTTP endpoint, and additional metrics may be registered to +// this Registry as normal. +// +// Testing +// +// You can easily build integration and unit tests for your controllers and +// webhooks using the test Environment (pkg/envtest). This will automatically +// stand up a copy of etcd and kube-apiserver, and provide the correct options +// to connect to the API server. It's designed to work well with the Ginkgo +// testing framework, but should work with any testing setup. +package controllerruntime diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go new file mode 100644 index 000000000..eedda4932 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go @@ -0,0 +1,335 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "fmt" + "strings" + + "github.com/go-logr/logr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +// Supporting mocking out functions for testing. +var newController = controller.New +var getGvk = apiutil.GVKForObject + +// project represents other forms that the we can use to +// send/receive a given resource (metadata-only, unstructured, etc). +type objectProjection int + +const ( + // projectAsNormal doesn't change the object from the form given. + projectAsNormal objectProjection = iota + // projectAsMetadata turns this into an metadata-only watch. + projectAsMetadata +) + +// Builder builds a Controller. +type Builder struct { + forInput ForInput + ownsInput []OwnsInput + watchesInput []WatchesInput + mgr manager.Manager + globalPredicates []predicate.Predicate + ctrl controller.Controller + ctrlOptions controller.Options + name string +} + +// ControllerManagedBy returns a new controller builder that will be started by the provided Manager. +func ControllerManagedBy(m manager.Manager) *Builder { + return &Builder{mgr: m} +} + +// ForInput represents the information set by For method. +type ForInput struct { + object client.Object + predicates []predicate.Predicate + objectProjection objectProjection + err error +} + +// For defines the type of Object being *reconciled*, and configures the ControllerManagedBy to respond to create / delete / +// update events by *reconciling the object*. +// This is the equivalent of calling +// Watches(&source.Kind{Type: apiType}, &handler.EnqueueRequestForObject{}). +func (blder *Builder) For(object client.Object, opts ...ForOption) *Builder { + if blder.forInput.object != nil { + blder.forInput.err = fmt.Errorf("For(...) should only be called once, could not assign multiple objects for reconciliation") + return blder + } + input := ForInput{object: object} + for _, opt := range opts { + opt.ApplyToFor(&input) + } + + blder.forInput = input + return blder +} + +// OwnsInput represents the information set by Owns method. +type OwnsInput struct { + object client.Object + predicates []predicate.Predicate + objectProjection objectProjection +} + +// Owns defines types of Objects being *generated* by the ControllerManagedBy, and configures the ControllerManagedBy to respond to +// create / delete / update events by *reconciling the owner object*. This is the equivalent of calling +// Watches(&source.Kind{Type: }, &handler.EnqueueRequestForOwner{OwnerType: apiType, IsController: true}). +func (blder *Builder) Owns(object client.Object, opts ...OwnsOption) *Builder { + input := OwnsInput{object: object} + for _, opt := range opts { + opt.ApplyToOwns(&input) + } + + blder.ownsInput = append(blder.ownsInput, input) + return blder +} + +// WatchesInput represents the information set by Watches method. +type WatchesInput struct { + src source.Source + eventhandler handler.EventHandler + predicates []predicate.Predicate + objectProjection objectProjection +} + +// Watches exposes the lower-level ControllerManagedBy Watches functions through the builder. Consider using +// Owns or For instead of Watches directly. +// Specified predicates are registered only for given source. +func (blder *Builder) Watches(src source.Source, eventhandler handler.EventHandler, opts ...WatchesOption) *Builder { + input := WatchesInput{src: src, eventhandler: eventhandler} + for _, opt := range opts { + opt.ApplyToWatches(&input) + } + + blder.watchesInput = append(blder.watchesInput, input) + return blder +} + +// WithEventFilter sets the event filters, to filter which create/update/delete/generic events eventually +// trigger reconciliations. For example, filtering on whether the resource version has changed. +// Given predicate is added for all watched objects. +// Defaults to the empty list. +func (blder *Builder) WithEventFilter(p predicate.Predicate) *Builder { + blder.globalPredicates = append(blder.globalPredicates, p) + return blder +} + +// WithOptions overrides the controller options use in doController. Defaults to empty. +func (blder *Builder) WithOptions(options controller.Options) *Builder { + blder.ctrlOptions = options + return blder +} + +// WithLogConstructor overrides the controller options's LogConstructor. +func (blder *Builder) WithLogConstructor(logConstructor func(*reconcile.Request) logr.Logger) *Builder { + blder.ctrlOptions.LogConstructor = logConstructor + return blder +} + +// Named sets the name of the controller to the given name. The name shows up +// in metrics, among other things, and thus should be a prometheus compatible name +// (underscores and alphanumeric characters only). +// +// By default, controllers are named using the lowercase version of their kind. +func (blder *Builder) Named(name string) *Builder { + blder.name = name + return blder +} + +// Complete builds the Application Controller. +func (blder *Builder) Complete(r reconcile.Reconciler) error { + _, err := blder.Build(r) + return err +} + +// Build builds the Application Controller and returns the Controller it created. +func (blder *Builder) Build(r reconcile.Reconciler) (controller.Controller, error) { + if r == nil { + return nil, fmt.Errorf("must provide a non-nil Reconciler") + } + if blder.mgr == nil { + return nil, fmt.Errorf("must provide a non-nil Manager") + } + if blder.forInput.err != nil { + return nil, blder.forInput.err + } + // Checking the reconcile type exist or not + if blder.forInput.object == nil { + return nil, fmt.Errorf("must provide an object for reconciliation") + } + + // Set the ControllerManagedBy + if err := blder.doController(r); err != nil { + return nil, err + } + + // Set the Watch + if err := blder.doWatch(); err != nil { + return nil, err + } + + return blder.ctrl, nil +} + +func (blder *Builder) project(obj client.Object, proj objectProjection) (client.Object, error) { + switch proj { + case projectAsNormal: + return obj, nil + case projectAsMetadata: + metaObj := &metav1.PartialObjectMetadata{} + gvk, err := getGvk(obj, blder.mgr.GetScheme()) + if err != nil { + return nil, fmt.Errorf("unable to determine GVK of %T for a metadata-only watch: %w", obj, err) + } + metaObj.SetGroupVersionKind(gvk) + return metaObj, nil + default: + panic(fmt.Sprintf("unexpected projection type %v on type %T, should not be possible since this is an internal field", proj, obj)) + } +} + +func (blder *Builder) doWatch() error { + // Reconcile type + typeForSrc, err := blder.project(blder.forInput.object, blder.forInput.objectProjection) + if err != nil { + return err + } + src := &source.Kind{Type: typeForSrc} + hdler := &handler.EnqueueRequestForObject{} + allPredicates := append(blder.globalPredicates, blder.forInput.predicates...) + if err := blder.ctrl.Watch(src, hdler, allPredicates...); err != nil { + return err + } + + // Watches the managed types + for _, own := range blder.ownsInput { + typeForSrc, err := blder.project(own.object, own.objectProjection) + if err != nil { + return err + } + src := &source.Kind{Type: typeForSrc} + hdler := &handler.EnqueueRequestForOwner{ + OwnerType: blder.forInput.object, + IsController: true, + } + allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) + allPredicates = append(allPredicates, own.predicates...) + if err := blder.ctrl.Watch(src, hdler, allPredicates...); err != nil { + return err + } + } + + // Do the watch requests + for _, w := range blder.watchesInput { + allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) + allPredicates = append(allPredicates, w.predicates...) + + // If the source of this watch is of type *source.Kind, project it. + if srckind, ok := w.src.(*source.Kind); ok { + typeForSrc, err := blder.project(srckind.Type, w.objectProjection) + if err != nil { + return err + } + srckind.Type = typeForSrc + } + + if err := blder.ctrl.Watch(w.src, w.eventhandler, allPredicates...); err != nil { + return err + } + } + return nil +} + +func (blder *Builder) getControllerName(gvk schema.GroupVersionKind) string { + if blder.name != "" { + return blder.name + } + return strings.ToLower(gvk.Kind) +} + +func (blder *Builder) doController(r reconcile.Reconciler) error { + globalOpts := blder.mgr.GetControllerOptions() + + ctrlOptions := blder.ctrlOptions + if ctrlOptions.Reconciler == nil { + ctrlOptions.Reconciler = r + } + + // Retrieve the GVK from the object we're reconciling + // to prepopulate logger information, and to optionally generate a default name. + gvk, err := getGvk(blder.forInput.object, blder.mgr.GetScheme()) + if err != nil { + return err + } + + // Setup concurrency. + if ctrlOptions.MaxConcurrentReconciles == 0 { + groupKind := gvk.GroupKind().String() + + if concurrency, ok := globalOpts.GroupKindConcurrency[groupKind]; ok && concurrency > 0 { + ctrlOptions.MaxConcurrentReconciles = concurrency + } + } + + // Setup cache sync timeout. + if ctrlOptions.CacheSyncTimeout == 0 && globalOpts.CacheSyncTimeout != nil { + ctrlOptions.CacheSyncTimeout = *globalOpts.CacheSyncTimeout + } + + controllerName := blder.getControllerName(gvk) + + // Setup the logger. + if ctrlOptions.LogConstructor == nil { + log := blder.mgr.GetLogger().WithValues( + "controller", controllerName, + "controllerGroup", gvk.Group, + "controllerKind", gvk.Kind, + ) + + lowerCamelCaseKind := strings.ToLower(gvk.Kind[:1]) + gvk.Kind[1:] + + ctrlOptions.LogConstructor = func(req *reconcile.Request) logr.Logger { + log := log + if req != nil { + log = log.WithValues( + lowerCamelCaseKind, klog.KRef(req.Namespace, req.Name), + "namespace", req.Namespace, "name", req.Name, + ) + } + return log + } + } + + // Build the controller and return. + blder.ctrl, err = newController(controllerName, blder.mgr, ctrlOptions) + return err +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/builder/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/builder/doc.go new file mode 100644 index 000000000..e4df1b709 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/builder/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package builder wraps other controller-runtime libraries and exposes simple +// patterns for building common Controllers. +// +// Projects built with the builder package can trivially be rebased on top of the underlying +// packages if the project requires more customized behavior in the future. +package builder + +import ( + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("builder") diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go new file mode 100644 index 000000000..c738ba7d1 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go @@ -0,0 +1,140 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// {{{ "Functional" Option Interfaces + +// ForOption is some configuration that modifies options for a For request. +type ForOption interface { + // ApplyToFor applies this configuration to the given for input. + ApplyToFor(*ForInput) +} + +// OwnsOption is some configuration that modifies options for a owns request. +type OwnsOption interface { + // ApplyToOwns applies this configuration to the given owns input. + ApplyToOwns(*OwnsInput) +} + +// WatchesOption is some configuration that modifies options for a watches request. +type WatchesOption interface { + // ApplyToWatches applies this configuration to the given watches options. + ApplyToWatches(*WatchesInput) +} + +// }}} + +// {{{ Multi-Type Options + +// WithPredicates sets the given predicates list. +func WithPredicates(predicates ...predicate.Predicate) Predicates { + return Predicates{ + predicates: predicates, + } +} + +// Predicates filters events before enqueuing the keys. +type Predicates struct { + predicates []predicate.Predicate +} + +// ApplyToFor applies this configuration to the given ForInput options. +func (w Predicates) ApplyToFor(opts *ForInput) { + opts.predicates = w.predicates +} + +// ApplyToOwns applies this configuration to the given OwnsInput options. +func (w Predicates) ApplyToOwns(opts *OwnsInput) { + opts.predicates = w.predicates +} + +// ApplyToWatches applies this configuration to the given WatchesInput options. +func (w Predicates) ApplyToWatches(opts *WatchesInput) { + opts.predicates = w.predicates +} + +var _ ForOption = &Predicates{} +var _ OwnsOption = &Predicates{} +var _ WatchesOption = &Predicates{} + +// }}} + +// {{{ For & Owns Dual-Type options + +// asProjection configures the projection (currently only metadata) on the input. +// Currently only metadata is supported. We might want to expand +// this to arbitrary non-special local projections in the future. +type projectAs objectProjection + +// ApplyToFor applies this configuration to the given ForInput options. +func (p projectAs) ApplyToFor(opts *ForInput) { + opts.objectProjection = objectProjection(p) +} + +// ApplyToOwns applies this configuration to the given OwnsInput options. +func (p projectAs) ApplyToOwns(opts *OwnsInput) { + opts.objectProjection = objectProjection(p) +} + +// ApplyToWatches applies this configuration to the given WatchesInput options. +func (p projectAs) ApplyToWatches(opts *WatchesInput) { + opts.objectProjection = objectProjection(p) +} + +var ( + // OnlyMetadata tells the controller to *only* cache metadata, and to watch + // the the API server in metadata-only form. This is useful when watching + // lots of objects, really big objects, or objects for which you only know + // the the GVK, but not the structure. You'll need to pass + // metav1.PartialObjectMetadata to the client when fetching objects in your + // reconciler, otherwise you'll end up with a duplicate structured or + // unstructured cache. + // + // When watching a resource with OnlyMetadata, for example the v1.Pod, you + // should not Get and List using the v1.Pod type. Instead, you should use + // the special metav1.PartialObjectMetadata type. + // + // ❌ Incorrect: + // + // pod := &v1.Pod{} + // mgr.GetClient().Get(ctx, nsAndName, pod) + // + // ✅ Correct: + // + // pod := &metav1.PartialObjectMetadata{} + // pod.SetGroupVersionKind(schema.GroupVersionKind{ + // Group: "", + // Version: "v1", + // Kind: "Pod", + // }) + // mgr.GetClient().Get(ctx, nsAndName, pod) + // + // In the first case, controller-runtime will create another cache for the + // concrete type on top of the metadata cache; this increases memory + // consumption and leads to race conditions as caches are not in sync. + OnlyMetadata = projectAs(projectAsMetadata) + + _ ForOption = OnlyMetadata + _ OwnsOption = OnlyMetadata + _ WatchesOption = OnlyMetadata +) + +// }}} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go new file mode 100644 index 000000000..18feb1cd7 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go @@ -0,0 +1,209 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "errors" + "net/http" + "net/url" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "sigs.k8s.io/controller-runtime/pkg/webhook/conversion" +) + +// WebhookBuilder builds a Webhook. +type WebhookBuilder struct { + apiType runtime.Object + withDefaulter admission.CustomDefaulter + withValidator admission.CustomValidator + gvk schema.GroupVersionKind + mgr manager.Manager + config *rest.Config +} + +// WebhookManagedBy allows inform its manager.Manager. +func WebhookManagedBy(m manager.Manager) *WebhookBuilder { + return &WebhookBuilder{mgr: m} +} + +// TODO(droot): update the GoDoc for conversion. + +// For takes a runtime.Object which should be a CR. +// If the given object implements the admission.Defaulter interface, a MutatingWebhook will be wired for this type. +// If the given object implements the admission.Validator interface, a ValidatingWebhook will be wired for this type. +func (blder *WebhookBuilder) For(apiType runtime.Object) *WebhookBuilder { + blder.apiType = apiType + return blder +} + +// WithDefaulter takes a admission.WithDefaulter interface, a MutatingWebhook will be wired for this type. +func (blder *WebhookBuilder) WithDefaulter(defaulter admission.CustomDefaulter) *WebhookBuilder { + blder.withDefaulter = defaulter + return blder +} + +// WithValidator takes a admission.WithValidator interface, a ValidatingWebhook will be wired for this type. +func (blder *WebhookBuilder) WithValidator(validator admission.CustomValidator) *WebhookBuilder { + blder.withValidator = validator + return blder +} + +// Complete builds the webhook. +func (blder *WebhookBuilder) Complete() error { + // Set the Config + blder.loadRestConfig() + + // Set the Webhook if needed + return blder.registerWebhooks() +} + +func (blder *WebhookBuilder) loadRestConfig() { + if blder.config == nil { + blder.config = blder.mgr.GetConfig() + } +} + +func (blder *WebhookBuilder) registerWebhooks() error { + typ, err := blder.getType() + if err != nil { + return err + } + + // Create webhook(s) for each type + blder.gvk, err = apiutil.GVKForObject(typ, blder.mgr.GetScheme()) + if err != nil { + return err + } + + blder.registerDefaultingWebhook() + blder.registerValidatingWebhook() + + err = blder.registerConversionWebhook() + if err != nil { + return err + } + return nil +} + +// registerDefaultingWebhook registers a defaulting webhook if th. +func (blder *WebhookBuilder) registerDefaultingWebhook() { + mwh := blder.getDefaultingWebhook() + if mwh != nil { + path := generateMutatePath(blder.gvk) + + // Checking if the path is already registered. + // If so, just skip it. + if !blder.isAlreadyHandled(path) { + log.Info("Registering a mutating webhook", + "GVK", blder.gvk, + "path", path) + blder.mgr.GetWebhookServer().Register(path, mwh) + } + } +} + +func (blder *WebhookBuilder) getDefaultingWebhook() *admission.Webhook { + if defaulter := blder.withDefaulter; defaulter != nil { + return admission.WithCustomDefaulter(blder.apiType, defaulter) + } + if defaulter, ok := blder.apiType.(admission.Defaulter); ok { + return admission.DefaultingWebhookFor(defaulter) + } + log.Info( + "skip registering a mutating webhook, object does not implement admission.Defaulter or WithDefaulter wasn't called", + "GVK", blder.gvk) + return nil +} + +func (blder *WebhookBuilder) registerValidatingWebhook() { + vwh := blder.getValidatingWebhook() + if vwh != nil { + path := generateValidatePath(blder.gvk) + + // Checking if the path is already registered. + // If so, just skip it. + if !blder.isAlreadyHandled(path) { + log.Info("Registering a validating webhook", + "GVK", blder.gvk, + "path", path) + blder.mgr.GetWebhookServer().Register(path, vwh) + } + } +} + +func (blder *WebhookBuilder) getValidatingWebhook() *admission.Webhook { + if validator := blder.withValidator; validator != nil { + return admission.WithCustomValidator(blder.apiType, validator) + } + if validator, ok := blder.apiType.(admission.Validator); ok { + return admission.ValidatingWebhookFor(validator) + } + log.Info( + "skip registering a validating webhook, object does not implement admission.Validator or WithValidator wasn't called", + "GVK", blder.gvk) + return nil +} + +func (blder *WebhookBuilder) registerConversionWebhook() error { + ok, err := conversion.IsConvertible(blder.mgr.GetScheme(), blder.apiType) + if err != nil { + log.Error(err, "conversion check failed", "GVK", blder.gvk) + return err + } + if ok { + if !blder.isAlreadyHandled("/convert") { + blder.mgr.GetWebhookServer().Register("/convert", &conversion.Webhook{}) + } + log.Info("Conversion webhook enabled", "GVK", blder.gvk) + } + + return nil +} + +func (blder *WebhookBuilder) getType() (runtime.Object, error) { + if blder.apiType != nil { + return blder.apiType, nil + } + return nil, errors.New("For() must be called with a valid object") +} + +func (blder *WebhookBuilder) isAlreadyHandled(path string) bool { + if blder.mgr.GetWebhookServer().WebhookMux == nil { + return false + } + h, p := blder.mgr.GetWebhookServer().WebhookMux.Handler(&http.Request{URL: &url.URL{Path: path}}) + if p == path && h != nil { + return true + } + return false +} + +func generateMutatePath(gvk schema.GroupVersionKind) string { + return "/mutate-" + strings.ReplaceAll(gvk.Group, ".", "-") + "-" + + gvk.Version + "-" + strings.ToLower(gvk.Kind) +} + +func generateValidatePath(gvk schema.GroupVersionKind) string { + return "/validate-" + strings.ReplaceAll(gvk.Group, ".", "-") + "-" + + gvk.Version + "-" + strings.ToLower(gvk.Kind) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go new file mode 100644 index 000000000..2a8f53347 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -0,0 +1,275 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + toolscache "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/cache/internal" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("object-cache") + +// Cache knows how to load Kubernetes objects, fetch informers to request +// to receive events for Kubernetes objects (at a low-level), +// and add indices to fields on the objects stored in the cache. +type Cache interface { + // Cache acts as a client to objects stored in the cache. + client.Reader + + // Cache loads informers and adds field indices. + Informers +} + +// Informers knows how to create or fetch informers for different +// group-version-kinds, and add indices to those informers. It's safe to call +// GetInformer from multiple threads. +type Informers interface { + // GetInformer fetches or constructs an informer for the given object that corresponds to a single + // API kind and resource. + GetInformer(ctx context.Context, obj client.Object) (Informer, error) + + // GetInformerForKind is similar to GetInformer, except that it takes a group-version-kind, instead + // of the underlying object. + GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) + + // Start runs all the informers known to this cache until the context is closed. + // It blocks. + Start(ctx context.Context) error + + // WaitForCacheSync waits for all the caches to sync. Returns false if it could not sync a cache. + WaitForCacheSync(ctx context.Context) bool + + // Informers knows how to add indices to the caches (informers) that it manages. + client.FieldIndexer +} + +// Informer - informer allows you interact with the underlying informer. +type Informer interface { + // AddEventHandler adds an event handler to the shared informer using the shared informer's resync + // period. Events to a single handler are delivered sequentially, but there is no coordination + // between different handlers. + AddEventHandler(handler toolscache.ResourceEventHandler) + // AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the + // specified resync period. Events to a single handler are delivered sequentially, but there is + // no coordination between different handlers. + AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) + // AddIndexers adds more indexers to this store. If you call this after you already have data + // in the store, the results are undefined. + AddIndexers(indexers toolscache.Indexers) error + // HasSynced return true if the informers underlying store has synced. + HasSynced() bool +} + +// ObjectSelector is an alias name of internal.Selector. +type ObjectSelector internal.Selector + +// SelectorsByObject associate a client.Object's GVK to a field/label selector. +// There is also `DefaultSelector` to set a global default (which will be overridden by +// a more specific setting here, if any). +type SelectorsByObject map[client.Object]ObjectSelector + +// Options are the optional arguments for creating a new InformersMap object. +type Options struct { + // Scheme is the scheme to use for mapping objects to GroupVersionKinds + Scheme *runtime.Scheme + + // Mapper is the RESTMapper to use for mapping GroupVersionKinds to Resources + Mapper meta.RESTMapper + + // Resync is the base frequency the informers are resynced. + // Defaults to defaultResyncTime. + // A 10 percent jitter will be added to the Resync period between informers + // So that all informers will not send list requests simultaneously. + Resync *time.Duration + + // Namespace restricts the cache's ListWatch to the desired namespace + // Default watches all namespaces + Namespace string + + // SelectorsByObject restricts the cache's ListWatch to the desired + // fields per GVK at the specified object, the map's value must implement + // Selector [1] using for example a Set [2] + // [1] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Selector + // [2] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Set + SelectorsByObject SelectorsByObject + + // DefaultSelector will be used as selectors for all object types + // that do not have a selector in SelectorsByObject defined. + DefaultSelector ObjectSelector + + // UnsafeDisableDeepCopyByObject indicates not to deep copy objects during get or + // list objects per GVK at the specified object. + // Be very careful with this, when enabled you must DeepCopy any object before mutating it, + // otherwise you will mutate the object in the cache. + UnsafeDisableDeepCopyByObject DisableDeepCopyByObject + + // TransformByObject is a map from GVKs to transformer functions which + // get applied when objects of the transformation are about to be committed + // to cache. + // + // This function is called both for new objects to enter the cache, + // and for updated objects. + TransformByObject TransformByObject + + // DefaultTransform is the transform used for all GVKs which do + // not have an explicit transform func set in TransformByObject + DefaultTransform toolscache.TransformFunc +} + +var defaultResyncTime = 10 * time.Hour + +// New initializes and returns a new Cache. +func New(config *rest.Config, opts Options) (Cache, error) { + opts, err := defaultOpts(config, opts) + if err != nil { + return nil, err + } + selectorsByGVK, err := convertToSelectorsByGVK(opts.SelectorsByObject, opts.DefaultSelector, opts.Scheme) + if err != nil { + return nil, err + } + disableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(opts.UnsafeDisableDeepCopyByObject, opts.Scheme) + if err != nil { + return nil, err + } + transformByGVK, err := convertToTransformByKindAndGVK(opts.TransformByObject, opts.DefaultTransform, opts.Scheme) + if err != nil { + return nil, err + } + + im := internal.NewInformersMap(config, opts.Scheme, opts.Mapper, *opts.Resync, opts.Namespace, selectorsByGVK, disableDeepCopyByGVK, transformByGVK) + return &informerCache{InformersMap: im}, nil +} + +// BuilderWithOptions returns a Cache constructor that will build the a cache +// honoring the options argument, this is useful to specify options like +// SelectorsByObject +// WARNING: if SelectorsByObject is specified. filtered out resources are not +// returned. +// WARNING: if UnsafeDisableDeepCopy is enabled, you must DeepCopy any object +// returned from cache get/list before mutating it. +func BuilderWithOptions(options Options) NewCacheFunc { + return func(config *rest.Config, opts Options) (Cache, error) { + if options.Scheme == nil { + options.Scheme = opts.Scheme + } + if options.Mapper == nil { + options.Mapper = opts.Mapper + } + if options.Resync == nil { + options.Resync = opts.Resync + } + if options.Namespace == "" { + options.Namespace = opts.Namespace + } + if opts.Resync == nil { + opts.Resync = options.Resync + } + + return New(config, options) + } +} + +func defaultOpts(config *rest.Config, opts Options) (Options, error) { + // Use the default Kubernetes Scheme if unset + if opts.Scheme == nil { + opts.Scheme = scheme.Scheme + } + + // Construct a new Mapper if unset + if opts.Mapper == nil { + var err error + opts.Mapper, err = apiutil.NewDiscoveryRESTMapper(config) + if err != nil { + log.WithName("setup").Error(err, "Failed to get API Group-Resources") + return opts, fmt.Errorf("could not create RESTMapper from config") + } + } + + // Default the resync period to 10 hours if unset + if opts.Resync == nil { + opts.Resync = &defaultResyncTime + } + return opts, nil +} + +func convertToSelectorsByGVK(selectorsByObject SelectorsByObject, defaultSelector ObjectSelector, scheme *runtime.Scheme) (internal.SelectorsByGVK, error) { + selectorsByGVK := internal.SelectorsByGVK{} + for object, selector := range selectorsByObject { + gvk, err := apiutil.GVKForObject(object, scheme) + if err != nil { + return nil, err + } + selectorsByGVK[gvk] = internal.Selector(selector) + } + selectorsByGVK[schema.GroupVersionKind{}] = internal.Selector(defaultSelector) + return selectorsByGVK, nil +} + +// DisableDeepCopyByObject associate a client.Object's GVK to disable DeepCopy during get or list from cache. +type DisableDeepCopyByObject map[client.Object]bool + +var _ client.Object = &ObjectAll{} + +// ObjectAll is the argument to represent all objects' types. +type ObjectAll struct { + client.Object +} + +func convertToDisableDeepCopyByGVK(disableDeepCopyByObject DisableDeepCopyByObject, scheme *runtime.Scheme) (internal.DisableDeepCopyByGVK, error) { + disableDeepCopyByGVK := internal.DisableDeepCopyByGVK{} + for obj, disable := range disableDeepCopyByObject { + switch obj.(type) { + case ObjectAll, *ObjectAll: + disableDeepCopyByGVK[internal.GroupVersionKindAll] = disable + default: + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return nil, err + } + disableDeepCopyByGVK[gvk] = disable + } + } + return disableDeepCopyByGVK, nil +} + +// TransformByObject associate a client.Object's GVK to a transformer function +// to be applied when storing the object into the cache. +type TransformByObject map[client.Object]toolscache.TransformFunc + +func convertToTransformByKindAndGVK(t TransformByObject, defaultTransform toolscache.TransformFunc, scheme *runtime.Scheme) (internal.TransformFuncByObject, error) { + result := internal.NewTransformFuncByObject() + for obj, transformation := range t { + if err := result.Set(obj, scheme, transformation); err != nil { + return nil, err + } + } + result.SetDefault(defaultTransform) + return result, nil +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/doc.go new file mode 100644 index 000000000..e1742ac0f --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cache provides object caches that act as caching client.Reader +// instances and help drive Kubernetes-object-based event handlers. +package cache diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go new file mode 100644 index 000000000..ac07be2a6 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go @@ -0,0 +1,217 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "reflect" + "strings" + + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/cache/internal" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +var ( + _ Informers = &informerCache{} + _ client.Reader = &informerCache{} + _ Cache = &informerCache{} +) + +// ErrCacheNotStarted is returned when trying to read from the cache that wasn't started. +type ErrCacheNotStarted struct{} + +func (*ErrCacheNotStarted) Error() string { + return "the cache is not started, can not read objects" +} + +// informerCache is a Kubernetes Object cache populated from InformersMap. informerCache wraps an InformersMap. +type informerCache struct { + *internal.InformersMap +} + +// Get implements Reader. +func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out client.Object) error { + gvk, err := apiutil.GVKForObject(out, ip.Scheme) + if err != nil { + return err + } + + started, cache, err := ip.InformersMap.Get(ctx, gvk, out) + if err != nil { + return err + } + + if !started { + return &ErrCacheNotStarted{} + } + return cache.Reader.Get(ctx, key, out) +} + +// List implements Reader. +func (ip *informerCache) List(ctx context.Context, out client.ObjectList, opts ...client.ListOption) error { + gvk, cacheTypeObj, err := ip.objectTypeForListObject(out) + if err != nil { + return err + } + + started, cache, err := ip.InformersMap.Get(ctx, *gvk, cacheTypeObj) + if err != nil { + return err + } + + if !started { + return &ErrCacheNotStarted{} + } + + return cache.Reader.List(ctx, out, opts...) +} + +// objectTypeForListObject tries to find the runtime.Object and associated GVK +// for a single object corresponding to the passed-in list type. We need them +// because they are used as cache map key. +func (ip *informerCache) objectTypeForListObject(list client.ObjectList) (*schema.GroupVersionKind, runtime.Object, error) { + gvk, err := apiutil.GVKForObject(list, ip.Scheme) + if err != nil { + return nil, nil, err + } + + // we need the non-list GVK, so chop off the "List" from the end of the kind + if strings.HasSuffix(gvk.Kind, "List") && apimeta.IsListType(list) { + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + _, isUnstructured := list.(*unstructured.UnstructuredList) + var cacheTypeObj runtime.Object + if isUnstructured { + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(gvk) + cacheTypeObj = u + } else { + itemsPtr, err := apimeta.GetItemsPtr(list) + if err != nil { + return nil, nil, err + } + // http://knowyourmeme.com/memes/this-is-fine + elemType := reflect.Indirect(reflect.ValueOf(itemsPtr)).Type().Elem() + if elemType.Kind() != reflect.Ptr { + elemType = reflect.PtrTo(elemType) + } + + cacheTypeValue := reflect.Zero(elemType) + var ok bool + cacheTypeObj, ok = cacheTypeValue.Interface().(runtime.Object) + if !ok { + return nil, nil, fmt.Errorf("cannot get cache for %T, its element %T is not a runtime.Object", list, cacheTypeValue.Interface()) + } + } + + return &gvk, cacheTypeObj, nil +} + +// GetInformerForKind returns the informer for the GroupVersionKind. +func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { + // Map the gvk to an object + obj, err := ip.Scheme.New(gvk) + if err != nil { + return nil, err + } + + _, i, err := ip.InformersMap.Get(ctx, gvk, obj) + if err != nil { + return nil, err + } + return i.Informer, err +} + +// GetInformer returns the informer for the obj. +func (ip *informerCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { + gvk, err := apiutil.GVKForObject(obj, ip.Scheme) + if err != nil { + return nil, err + } + + _, i, err := ip.InformersMap.Get(ctx, gvk, obj) + if err != nil { + return nil, err + } + return i.Informer, err +} + +// NeedLeaderElection implements the LeaderElectionRunnable interface +// to indicate that this can be started without requiring the leader lock. +func (ip *informerCache) NeedLeaderElection() bool { + return false +} + +// IndexField adds an indexer to the underlying cache, using extraction function to get +// value(s) from the given field. This index can then be used by passing a field selector +// to List. For one-to-one compatibility with "normal" field selectors, only return one value. +// The values may be anything. They will automatically be prefixed with the namespace of the +// given object, if present. The objects passed are guaranteed to be objects of the correct type. +func (ip *informerCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + informer, err := ip.GetInformer(ctx, obj) + if err != nil { + return err + } + return indexByField(informer, field, extractValue) +} + +func indexByField(indexer Informer, field string, extractor client.IndexerFunc) error { + indexFunc := func(objRaw interface{}) ([]string, error) { + // TODO(directxman12): check if this is the correct type? + obj, isObj := objRaw.(client.Object) + if !isObj { + return nil, fmt.Errorf("object of type %T is not an Object", objRaw) + } + meta, err := apimeta.Accessor(obj) + if err != nil { + return nil, err + } + ns := meta.GetNamespace() + + rawVals := extractor(obj) + var vals []string + if ns == "" { + // if we're not doubling the keys for the namespaced case, just create a new slice with same length + vals = make([]string, len(rawVals)) + } else { + // if we need to add non-namespaced versions too, double the length + vals = make([]string, len(rawVals)*2) + } + for i, rawVal := range rawVals { + // save a namespaced variant, so that we can ask + // "what are all the object matching a given index *in a given namespace*" + vals[i] = internal.KeyToNamespacedKey(ns, rawVal) + if ns != "" { + // if we have a namespace, also inject a special index key for listing + // regardless of the object namespace + vals[i+len(rawVals)] = internal.KeyToNamespacedKey("", rawVal) + } + } + + return vals, nil + } + + return indexer.AddIndexers(cache.Indexers{internal.FieldIndexName(field): indexFunc}) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go new file mode 100644 index 000000000..b95af18d7 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go @@ -0,0 +1,218 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "fmt" + "reflect" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/client-go/tools/cache" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// CacheReader is a client.Reader. +var _ client.Reader = &CacheReader{} + +// CacheReader wraps a cache.Index to implement the client.CacheReader interface for a single type. +type CacheReader struct { + // indexer is the underlying indexer wrapped by this cache. + indexer cache.Indexer + + // groupVersionKind is the group-version-kind of the resource. + groupVersionKind schema.GroupVersionKind + + // scopeName is the scope of the resource (namespaced or cluster-scoped). + scopeName apimeta.RESTScopeName + + // disableDeepCopy indicates not to deep copy objects during get or list objects. + // Be very careful with this, when enabled you must DeepCopy any object before mutating it, + // otherwise you will mutate the object in the cache. + disableDeepCopy bool +} + +// Get checks the indexer for the object and writes a copy of it if found. +func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Object) error { + if c.scopeName == apimeta.RESTScopeNameRoot { + key.Namespace = "" + } + storeKey := objectKeyToStoreKey(key) + + // Lookup the object from the indexer cache + obj, exists, err := c.indexer.GetByKey(storeKey) + if err != nil { + return err + } + + // Not found, return an error + if !exists { + // Resource gets transformed into Kind in the error anyway, so this is fine + return apierrors.NewNotFound(schema.GroupResource{ + Group: c.groupVersionKind.Group, + Resource: c.groupVersionKind.Kind, + }, key.Name) + } + + // Verify the result is a runtime.Object + if _, isObj := obj.(runtime.Object); !isObj { + // This should never happen + return fmt.Errorf("cache contained %T, which is not an Object", obj) + } + + if c.disableDeepCopy { + // skip deep copy which might be unsafe + // you must DeepCopy any object before mutating it outside + } else { + // deep copy to avoid mutating cache + obj = obj.(runtime.Object).DeepCopyObject() + } + + // Copy the value of the item in the cache to the returned value + // TODO(directxman12): this is a terrible hack, pls fix (we should have deepcopyinto) + outVal := reflect.ValueOf(out) + objVal := reflect.ValueOf(obj) + if !objVal.Type().AssignableTo(outVal.Type()) { + return fmt.Errorf("cache had type %s, but %s was asked for", objVal.Type(), outVal.Type()) + } + reflect.Indirect(outVal).Set(reflect.Indirect(objVal)) + if !c.disableDeepCopy { + out.GetObjectKind().SetGroupVersionKind(c.groupVersionKind) + } + + return nil +} + +// List lists items out of the indexer and writes them to out. +func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...client.ListOption) error { + var objs []interface{} + var err error + + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + + switch { + case listOpts.FieldSelector != nil: + // TODO(directxman12): support more complicated field selectors by + // combining multiple indices, GetIndexers, etc + field, val, requiresExact := requiresExactMatch(listOpts.FieldSelector) + if !requiresExact { + return fmt.Errorf("non-exact field matches are not supported by the cache") + } + // list all objects by the field selector. If this is namespaced and we have one, ask for the + // namespaced index key. Otherwise, ask for the non-namespaced variant by using the fake "all namespaces" + // namespace. + objs, err = c.indexer.ByIndex(FieldIndexName(field), KeyToNamespacedKey(listOpts.Namespace, val)) + case listOpts.Namespace != "": + objs, err = c.indexer.ByIndex(cache.NamespaceIndex, listOpts.Namespace) + default: + objs = c.indexer.List() + } + if err != nil { + return err + } + var labelSel labels.Selector + if listOpts.LabelSelector != nil { + labelSel = listOpts.LabelSelector + } + + limitSet := listOpts.Limit > 0 + + runtimeObjs := make([]runtime.Object, 0, len(objs)) + for _, item := range objs { + // if the Limit option is set and the number of items + // listed exceeds this limit, then stop reading. + if limitSet && int64(len(runtimeObjs)) >= listOpts.Limit { + break + } + obj, isObj := item.(runtime.Object) + if !isObj { + return fmt.Errorf("cache contained %T, which is not an Object", obj) + } + meta, err := apimeta.Accessor(obj) + if err != nil { + return err + } + if labelSel != nil { + lbls := labels.Set(meta.GetLabels()) + if !labelSel.Matches(lbls) { + continue + } + } + + var outObj runtime.Object + if c.disableDeepCopy { + // skip deep copy which might be unsafe + // you must DeepCopy any object before mutating it outside + outObj = obj + } else { + outObj = obj.DeepCopyObject() + outObj.GetObjectKind().SetGroupVersionKind(c.groupVersionKind) + } + runtimeObjs = append(runtimeObjs, outObj) + } + return apimeta.SetList(out, runtimeObjs) +} + +// objectKeyToStorageKey converts an object key to store key. +// It's akin to MetaNamespaceKeyFunc. It's separate from +// String to allow keeping the key format easily in sync with +// MetaNamespaceKeyFunc. +func objectKeyToStoreKey(k client.ObjectKey) string { + if k.Namespace == "" { + return k.Name + } + return k.Namespace + "/" + k.Name +} + +// requiresExactMatch checks if the given field selector is of the form `k=v` or `k==v`. +func requiresExactMatch(sel fields.Selector) (field, val string, required bool) { + reqs := sel.Requirements() + if len(reqs) != 1 { + return "", "", false + } + req := reqs[0] + if req.Operator != selection.Equals && req.Operator != selection.DoubleEquals { + return "", "", false + } + return req.Field, req.Value, true +} + +// FieldIndexName constructs the name of the index over the given field, +// for use with an indexer. +func FieldIndexName(field string) string { + return "field:" + field +} + +// noNamespaceNamespace is used as the "namespace" when we want to list across all namespaces. +const allNamespacesNamespace = "__all_namespaces" + +// KeyToNamespacedKey prefixes the given index key with a namespace +// for use in field selector indexes. +func KeyToNamespacedKey(ns string, baseKey string) string { + if ns != "" { + return ns + "/" + baseKey + } + return allNamespacesNamespace + "/" + baseKey +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go new file mode 100644 index 000000000..27f46e327 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go @@ -0,0 +1,126 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" +) + +// InformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. +// It uses a standard parameter codec constructed based on the given generated Scheme. +type InformersMap struct { + // we abstract over the details of structured/unstructured/metadata with the specificInformerMaps + // TODO(directxman12): genericize this over different projections now that we have 3 different maps + + structured *specificInformersMap + unstructured *specificInformersMap + metadata *specificInformersMap + + // Scheme maps runtime.Objects to GroupVersionKinds + Scheme *runtime.Scheme +} + +// NewInformersMap creates a new InformersMap that can create informers for +// both structured and unstructured objects. +func NewInformersMap(config *rest.Config, + scheme *runtime.Scheme, + mapper meta.RESTMapper, + resync time.Duration, + namespace string, + selectors SelectorsByGVK, + disableDeepCopy DisableDeepCopyByGVK, + transformers TransformFuncByObject, +) *InformersMap { + return &InformersMap{ + structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), + unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), + metadata: newMetadataInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), + + Scheme: scheme, + } +} + +// Start calls Run on each of the informers and sets started to true. Blocks on the context. +func (m *InformersMap) Start(ctx context.Context) error { + go m.structured.Start(ctx) + go m.unstructured.Start(ctx) + go m.metadata.Start(ctx) + <-ctx.Done() + return nil +} + +// WaitForCacheSync waits until all the caches have been started and synced. +func (m *InformersMap) WaitForCacheSync(ctx context.Context) bool { + syncedFuncs := append([]cache.InformerSynced(nil), m.structured.HasSyncedFuncs()...) + syncedFuncs = append(syncedFuncs, m.unstructured.HasSyncedFuncs()...) + syncedFuncs = append(syncedFuncs, m.metadata.HasSyncedFuncs()...) + + if !m.structured.waitForStarted(ctx) { + return false + } + if !m.unstructured.waitForStarted(ctx) { + return false + } + if !m.metadata.waitForStarted(ctx) { + return false + } + return cache.WaitForCacheSync(ctx.Done(), syncedFuncs...) +} + +// Get will create a new Informer and add it to the map of InformersMap if none exists. Returns +// the Informer from the map. +func (m *InformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { + switch obj.(type) { + case *unstructured.Unstructured: + return m.unstructured.Get(ctx, gvk, obj) + case *unstructured.UnstructuredList: + return m.unstructured.Get(ctx, gvk, obj) + case *metav1.PartialObjectMetadata: + return m.metadata.Get(ctx, gvk, obj) + case *metav1.PartialObjectMetadataList: + return m.metadata.Get(ctx, gvk, obj) + default: + return m.structured.Get(ctx, gvk, obj) + } +} + +// newStructuredInformersMap creates a new InformersMap for structured objects. +func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, + namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { + return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createStructuredListWatch) +} + +// newUnstructuredInformersMap creates a new InformersMap for unstructured objects. +func newUnstructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, + namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { + return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createUnstructuredListWatch) +} + +// newMetadataInformersMap creates a new InformersMap for metadata-only objects. +func newMetadataInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, + namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { + return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createMetadataListWatch) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go new file mode 100644 index 000000000..54bd7eec9 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go @@ -0,0 +1,35 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import "k8s.io/apimachinery/pkg/runtime/schema" + +// GroupVersionKindAll is the argument to represent all GroupVersionKind types. +var GroupVersionKindAll = schema.GroupVersionKind{} + +// DisableDeepCopyByGVK associate a GroupVersionKind to disable DeepCopy during get or list from cache. +type DisableDeepCopyByGVK map[schema.GroupVersionKind]bool + +// IsDisabled returns whether a GroupVersionKind is disabled DeepCopy. +func (disableByGVK DisableDeepCopyByGVK) IsDisabled(gvk schema.GroupVersionKind) bool { + if d, ok := disableByGVK[gvk]; ok { + return d + } else if d, ok = disableByGVK[GroupVersionKindAll]; ok { + return d + } + return false +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go new file mode 100644 index 000000000..1524d2316 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go @@ -0,0 +1,480 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "fmt" + "math/rand" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/metadata" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// clientListWatcherFunc knows how to create a ListWatcher. +type createListWatcherFunc func(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) + +// newSpecificInformersMap returns a new specificInformersMap (like +// the generical InformersMap, except that it doesn't implement WaitForCacheSync). +func newSpecificInformersMap(config *rest.Config, + scheme *runtime.Scheme, + mapper meta.RESTMapper, + resync time.Duration, + namespace string, + selectors SelectorsByGVK, + disableDeepCopy DisableDeepCopyByGVK, + transformers TransformFuncByObject, + createListWatcher createListWatcherFunc, +) *specificInformersMap { + ip := &specificInformersMap{ + config: config, + Scheme: scheme, + mapper: mapper, + informersByGVK: make(map[schema.GroupVersionKind]*MapEntry), + codecs: serializer.NewCodecFactory(scheme), + paramCodec: runtime.NewParameterCodec(scheme), + resync: resync, + startWait: make(chan struct{}), + createListWatcher: createListWatcher, + namespace: namespace, + selectors: selectors.forGVK, + disableDeepCopy: disableDeepCopy, + transformers: transformers, + } + return ip +} + +// MapEntry contains the cached data for an Informer. +type MapEntry struct { + // Informer is the cached informer + Informer cache.SharedIndexInformer + + // CacheReader wraps Informer and implements the CacheReader interface for a single type + Reader CacheReader +} + +// specificInformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. +// It uses a standard parameter codec constructed based on the given generated Scheme. +type specificInformersMap struct { + // Scheme maps runtime.Objects to GroupVersionKinds + Scheme *runtime.Scheme + + // config is used to talk to the apiserver + config *rest.Config + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper + + // informersByGVK is the cache of informers keyed by groupVersionKind + informersByGVK map[schema.GroupVersionKind]*MapEntry + + // codecs is used to create a new REST client + codecs serializer.CodecFactory + + // paramCodec is used by list and watch + paramCodec runtime.ParameterCodec + + // stop is the stop channel to stop informers + stop <-chan struct{} + + // resync is the base frequency the informers are resynced + // a 10 percent jitter will be added to the resync period between informers + // so that all informers will not send list requests simultaneously. + resync time.Duration + + // mu guards access to the map + mu sync.RWMutex + + // start is true if the informers have been started + started bool + + // startWait is a channel that is closed after the + // informer has been started. + startWait chan struct{} + + // createClient knows how to create a client and a list object, + // and allows for abstracting over the particulars of structured vs + // unstructured objects. + createListWatcher createListWatcherFunc + + // namespace is the namespace that all ListWatches are restricted to + // default or empty string means all namespaces + namespace string + + // selectors are the label or field selectors that will be added to the + // ListWatch ListOptions. + selectors func(gvk schema.GroupVersionKind) Selector + + // disableDeepCopy indicates not to deep copy objects during get or list objects. + disableDeepCopy DisableDeepCopyByGVK + + // transform funcs are applied to objects before they are committed to the cache + transformers TransformFuncByObject +} + +// Start calls Run on each of the informers and sets started to true. Blocks on the context. +// It doesn't return start because it can't return an error, and it's not a runnable directly. +func (ip *specificInformersMap) Start(ctx context.Context) { + func() { + ip.mu.Lock() + defer ip.mu.Unlock() + + // Set the stop channel so it can be passed to informers that are added later + ip.stop = ctx.Done() + + // Start each informer + for _, informer := range ip.informersByGVK { + go informer.Informer.Run(ctx.Done()) + } + + // Set started to true so we immediately start any informers added later. + ip.started = true + close(ip.startWait) + }() + <-ctx.Done() +} + +func (ip *specificInformersMap) waitForStarted(ctx context.Context) bool { + select { + case <-ip.startWait: + return true + case <-ctx.Done(): + return false + } +} + +// HasSyncedFuncs returns all the HasSynced functions for the informers in this map. +func (ip *specificInformersMap) HasSyncedFuncs() []cache.InformerSynced { + ip.mu.RLock() + defer ip.mu.RUnlock() + syncedFuncs := make([]cache.InformerSynced, 0, len(ip.informersByGVK)) + for _, informer := range ip.informersByGVK { + syncedFuncs = append(syncedFuncs, informer.Informer.HasSynced) + } + return syncedFuncs +} + +// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns +// the Informer from the map. +func (ip *specificInformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { + // Return the informer if it is found + i, started, ok := func() (*MapEntry, bool, bool) { + ip.mu.RLock() + defer ip.mu.RUnlock() + i, ok := ip.informersByGVK[gvk] + return i, ip.started, ok + }() + + if !ok { + var err error + if i, started, err = ip.addInformerToMap(gvk, obj); err != nil { + return started, nil, err + } + } + + if started && !i.Informer.HasSynced() { + // Wait for it to sync before returning the Informer so that folks don't read from a stale cache. + if !cache.WaitForCacheSync(ctx.Done(), i.Informer.HasSynced) { + return started, nil, apierrors.NewTimeoutError(fmt.Sprintf("failed waiting for %T Informer to sync", obj), 0) + } + } + + return started, i, nil +} + +func (ip *specificInformersMap) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, bool, error) { + ip.mu.Lock() + defer ip.mu.Unlock() + + // Check the cache to see if we already have an Informer. If we do, return the Informer. + // This is for the case where 2 routines tried to get the informer when it wasn't in the map + // so neither returned early, but the first one created it. + if i, ok := ip.informersByGVK[gvk]; ok { + return i, ip.started, nil + } + + // Create a NewSharedIndexInformer and add it to the map. + var lw *cache.ListWatch + lw, err := ip.createListWatcher(gvk, ip) + if err != nil { + return nil, false, err + } + ni := cache.NewSharedIndexInformer(lw, obj, resyncPeriod(ip.resync)(), cache.Indexers{ + cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, + }) + + // Check to see if there is a transformer for this gvk + if err := ni.SetTransform(ip.transformers.Get(gvk)); err != nil { + return nil, false, err + } + + rm, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, false, err + } + + i := &MapEntry{ + Informer: ni, + Reader: CacheReader{ + indexer: ni.GetIndexer(), + groupVersionKind: gvk, + scopeName: rm.Scope.Name(), + disableDeepCopy: ip.disableDeepCopy.IsDisabled(gvk), + }, + } + ip.informersByGVK[gvk] = i + + // Start the Informer if need by + // TODO(seans): write thorough tests and document what happens here - can you add indexers? + // can you add eventhandlers? + if ip.started { + go i.Informer.Run(ip.stop) + } + return i, ip.started, nil +} + +// newListWatch returns a new ListWatch object that can be used to create a SharedIndexInformer. +func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { + // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the + // groupVersionKind to the Resource API we will use. + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + client, err := apiutil.RESTClientForGVK(gvk, false, ip.config, ip.codecs) + if err != nil { + return nil, err + } + listGVK := gvk.GroupVersion().WithKind(gvk.Kind + "List") + listObj, err := ip.Scheme.New(listGVK) + if err != nil { + return nil, err + } + + // TODO: the functions that make use of this ListWatch should be adapted to + // pass in their own contexts instead of relying on this fixed one here. + ctx := context.TODO() + // Create a new ListWatch for the obj + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.selectors(gvk).ApplyToList(&opts) + res := listObj.DeepCopyObject() + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot + err := client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Do(ctx).Into(res) + return res, err + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + ip.selectors(gvk).ApplyToList(&opts) + // Watch needs to be set to true separately + opts.Watch = true + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot + return client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Watch(ctx) + }, + }, nil +} + +func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { + // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the + // groupVersionKind to the Resource API we will use. + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + // If the rest configuration has a negotiated serializer passed in, + // we should remove it and use the one that the dynamic client sets for us. + cfg := rest.CopyConfig(ip.config) + cfg.NegotiatedSerializer = nil + dynamicClient, err := dynamic.NewForConfig(cfg) + if err != nil { + return nil, err + } + + // TODO: the functions that make use of this ListWatch should be adapted to + // pass in their own contexts instead of relying on this fixed one here. + ctx := context.TODO() + // Create a new ListWatch for the obj + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.selectors(gvk).ApplyToList(&opts) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return dynamicClient.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts) + } + return dynamicClient.Resource(mapping.Resource).List(ctx, opts) + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + ip.selectors(gvk).ApplyToList(&opts) + // Watch needs to be set to true separately + opts.Watch = true + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return dynamicClient.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts) + } + return dynamicClient.Resource(mapping.Resource).Watch(ctx, opts) + }, + }, nil +} + +func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { + // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the + // groupVersionKind to the Resource API we will use. + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + // Always clear the negotiated serializer and use the one + // set from the metadata client. + cfg := rest.CopyConfig(ip.config) + cfg.NegotiatedSerializer = nil + + // grab the metadata client + client, err := metadata.NewForConfig(cfg) + if err != nil { + return nil, err + } + + // TODO: the functions that make use of this ListWatch should be adapted to + // pass in their own contexts instead of relying on this fixed one here. + ctx := context.TODO() + + // create the relevant listwatch + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.selectors(gvk).ApplyToList(&opts) + + var ( + list *metav1.PartialObjectMetadataList + err error + ) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + list, err = client.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts) + } else { + list, err = client.Resource(mapping.Resource).List(ctx, opts) + } + if list != nil { + for i := range list.Items { + list.Items[i].SetGroupVersionKind(gvk) + } + } + return list, err + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + ip.selectors(gvk).ApplyToList(&opts) + // Watch needs to be set to true separately + opts.Watch = true + + var ( + watcher watch.Interface + err error + ) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + watcher, err = client.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts) + } else { + watcher, err = client.Resource(mapping.Resource).Watch(ctx, opts) + } + if watcher != nil { + watcher = newGVKFixupWatcher(gvk, watcher) + } + return watcher, err + }, + }, nil +} + +// newGVKFixupWatcher adds a wrapper that preserves the GVK information when +// events come in. +// +// This works around a bug where GVK information is not passed into mapping +// functions when using the OnlyMetadata option in the builder. +// This issue is most likely caused by kubernetes/kubernetes#80609. +// See kubernetes-sigs/controller-runtime#1484. +// +// This was originally implemented as a cache.ResourceEventHandler wrapper but +// that contained a data race which was resolved by setting the GVK in a watch +// wrapper, before the objects are written to the cache. +// See kubernetes-sigs/controller-runtime#1650. +// +// The original watch wrapper was found to be incompatible with +// k8s.io/client-go/tools/cache.Reflector so it has been re-implemented as a +// watch.Filter which is compatible. +// See kubernetes-sigs/controller-runtime#1789. +func newGVKFixupWatcher(gvk schema.GroupVersionKind, watcher watch.Interface) watch.Interface { + return watch.Filter( + watcher, + func(in watch.Event) (watch.Event, bool) { + in.Object.GetObjectKind().SetGroupVersionKind(gvk) + return in, true + }, + ) +} + +// resyncPeriod returns a function which generates a duration each time it is +// invoked; this is so that multiple controllers don't get into lock-step and all +// hammer the apiserver with list requests simultaneously. +func resyncPeriod(resync time.Duration) func() time.Duration { + return func() time.Duration { + // the factor will fall into [0.9, 1.1) + factor := rand.Float64()/5.0 + 0.9 //nolint:gosec + return time.Duration(float64(resync.Nanoseconds()) * factor) + } +} + +// restrictNamespaceBySelector returns either a global restriction for all ListWatches +// if not default/empty, or the namespace that a ListWatch for the specific resource +// is restricted to, based on a specified field selector for metadata.namespace field. +func restrictNamespaceBySelector(namespaceOpt string, s Selector) string { + if namespaceOpt != "" { + // namespace is already restricted + return namespaceOpt + } + fieldSelector := s.Field + if fieldSelector == nil || fieldSelector.Empty() { + return "" + } + // check whether a selector includes the namespace field + value, found := fieldSelector.RequiresExactMatch("metadata.namespace") + if found { + return value + } + return "" +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go new file mode 100644 index 000000000..4eff32fb3 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go @@ -0,0 +1,54 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SelectorsByGVK associate a GroupVersionKind to a field/label selector. +type SelectorsByGVK map[schema.GroupVersionKind]Selector + +func (s SelectorsByGVK) forGVK(gvk schema.GroupVersionKind) Selector { + if specific, found := s[gvk]; found { + return specific + } + if defaultSelector, found := s[schema.GroupVersionKind{}]; found { + return defaultSelector + } + + return Selector{} +} + +// Selector specify the label/field selector to fill in ListOptions. +type Selector struct { + Label labels.Selector + Field fields.Selector +} + +// ApplyToList fill in ListOptions LabelSelector and FieldSelector if needed. +func (s Selector) ApplyToList(listOpts *metav1.ListOptions) { + if s.Label != nil { + listOpts.LabelSelector = s.Label.String() + } + if s.Field != nil { + listOpts.FieldSelector = s.Field.String() + } +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go new file mode 100644 index 000000000..8cf642c4b --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go @@ -0,0 +1,50 @@ +package internal + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// TransformFuncByObject provides access to the correct transform function for +// any given GVK. +type TransformFuncByObject interface { + Set(runtime.Object, *runtime.Scheme, cache.TransformFunc) error + Get(schema.GroupVersionKind) cache.TransformFunc + SetDefault(transformer cache.TransformFunc) +} + +type transformFuncByGVK struct { + defaultTransform cache.TransformFunc + transformers map[schema.GroupVersionKind]cache.TransformFunc +} + +// NewTransformFuncByObject creates a new TransformFuncByObject instance. +func NewTransformFuncByObject() TransformFuncByObject { + return &transformFuncByGVK{ + transformers: make(map[schema.GroupVersionKind]cache.TransformFunc), + defaultTransform: nil, + } +} + +func (t *transformFuncByGVK) SetDefault(transformer cache.TransformFunc) { + t.defaultTransform = transformer +} + +func (t *transformFuncByGVK) Set(obj runtime.Object, scheme *runtime.Scheme, transformer cache.TransformFunc) error { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return err + } + + t.transformers[gvk] = transformer + return nil +} + +func (t transformFuncByGVK) Get(gvk schema.GroupVersionKind) cache.TransformFunc { + if val, ok := t.transformers[gvk]; ok { + return val + } + return t.defaultTransform +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go new file mode 100644 index 000000000..47a5bb3b3 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go @@ -0,0 +1,331 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + toolscache "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" +) + +// NewCacheFunc - Function for creating a new cache from the options and a rest config. +type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error) + +// a new global namespaced cache to handle cluster scoped resources. +const globalCache = "_cluster-scope" + +// MultiNamespacedCacheBuilder - Builder function to create a new multi-namespaced cache. +// This will scope the cache to a list of namespaces. Listing for all namespaces +// will list for all the namespaces that this knows about. By default this will create +// a global cache for cluster scoped resource. Note that this is not intended +// to be used for excluding namespaces, this is better done via a Predicate. Also note that +// you may face performance issues when using this with a high number of namespaces. +func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc { + return func(config *rest.Config, opts Options) (Cache, error) { + opts, err := defaultOpts(config, opts) + if err != nil { + return nil, err + } + + caches := map[string]Cache{} + + // create a cache for cluster scoped resources + gCache, err := New(config, opts) + if err != nil { + return nil, fmt.Errorf("error creating global cache: %w", err) + } + + for _, ns := range namespaces { + opts.Namespace = ns + c, err := New(config, opts) + if err != nil { + return nil, err + } + caches[ns] = c + } + return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme, RESTMapper: opts.Mapper, clusterCache: gCache}, nil + } +} + +// multiNamespaceCache knows how to handle multiple namespaced caches +// Use this feature when scoping permissions for your +// operator to a list of namespaces instead of watching every namespace +// in the cluster. +type multiNamespaceCache struct { + namespaceToCache map[string]Cache + Scheme *runtime.Scheme + RESTMapper apimeta.RESTMapper + clusterCache Cache +} + +var _ Cache = &multiNamespaceCache{} + +// Methods for multiNamespaceCache to conform to the Informers interface. +func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { + informers := map[string]Informer{} + + // If the object is clusterscoped, get the informer from clusterCache, + // if not use the namespaced caches. + isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + if err != nil { + return nil, err + } + if !isNamespaced { + clusterCacheInf, err := c.clusterCache.GetInformer(ctx, obj) + if err != nil { + return nil, err + } + informers[globalCache] = clusterCacheInf + + return &multiNamespaceInformer{namespaceToInformer: informers}, nil + } + + for ns, cache := range c.namespaceToCache { + informer, err := cache.GetInformer(ctx, obj) + if err != nil { + return nil, err + } + informers[ns] = informer + } + + return &multiNamespaceInformer{namespaceToInformer: informers}, nil +} + +func (c *multiNamespaceCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { + informers := map[string]Informer{} + + // If the object is clusterscoped, get the informer from clusterCache, + // if not use the namespaced caches. + isNamespaced, err := objectutil.IsAPINamespacedWithGVK(gvk, c.Scheme, c.RESTMapper) + if err != nil { + return nil, err + } + if !isNamespaced { + clusterCacheInf, err := c.clusterCache.GetInformerForKind(ctx, gvk) + if err != nil { + return nil, err + } + informers[globalCache] = clusterCacheInf + + return &multiNamespaceInformer{namespaceToInformer: informers}, nil + } + + for ns, cache := range c.namespaceToCache { + informer, err := cache.GetInformerForKind(ctx, gvk) + if err != nil { + return nil, err + } + informers[ns] = informer + } + + return &multiNamespaceInformer{namespaceToInformer: informers}, nil +} + +func (c *multiNamespaceCache) Start(ctx context.Context) error { + // start global cache + go func() { + err := c.clusterCache.Start(ctx) + if err != nil { + log.Error(err, "cluster scoped cache failed to start") + } + }() + + // start namespaced caches + for ns, cache := range c.namespaceToCache { + go func(ns string, cache Cache) { + err := cache.Start(ctx) + if err != nil { + log.Error(err, "multinamespace cache failed to start namespaced informer", "namespace", ns) + } + }(ns, cache) + } + + <-ctx.Done() + return nil +} + +func (c *multiNamespaceCache) WaitForCacheSync(ctx context.Context) bool { + synced := true + for _, cache := range c.namespaceToCache { + if s := cache.WaitForCacheSync(ctx); !s { + synced = s + } + } + + // check if cluster scoped cache has synced + if !c.clusterCache.WaitForCacheSync(ctx) { + synced = false + } + return synced +} + +func (c *multiNamespaceCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + if err != nil { + return nil //nolint:nilerr + } + + if !isNamespaced { + return c.clusterCache.IndexField(ctx, obj, field, extractValue) + } + + for _, cache := range c.namespaceToCache { + if err := cache.IndexField(ctx, obj, field, extractValue); err != nil { + return err + } + } + return nil +} + +func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error { + isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + if err != nil { + return err + } + + if !isNamespaced { + // Look into the global cache to fetch the object + return c.clusterCache.Get(ctx, key, obj) + } + + cache, ok := c.namespaceToCache[key.Namespace] + if !ok { + return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", key) + } + return cache.Get(ctx, key, obj) +} + +// List multi namespace cache will get all the objects in the namespaces that the cache is watching if asked for all namespaces. +func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + + isNamespaced, err := objectutil.IsAPINamespaced(list, c.Scheme, c.RESTMapper) + if err != nil { + return err + } + + if !isNamespaced { + // Look at the global cache to get the objects with the specified GVK + return c.clusterCache.List(ctx, list, opts...) + } + + if listOpts.Namespace != corev1.NamespaceAll { + cache, ok := c.namespaceToCache[listOpts.Namespace] + if !ok { + return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", listOpts.Namespace) + } + return cache.List(ctx, list, opts...) + } + + listAccessor, err := apimeta.ListAccessor(list) + if err != nil { + return err + } + + allItems, err := apimeta.ExtractList(list) + if err != nil { + return err + } + + limitSet := listOpts.Limit > 0 + + var resourceVersion string + for _, cache := range c.namespaceToCache { + listObj := list.DeepCopyObject().(client.ObjectList) + err = cache.List(ctx, listObj, &listOpts) + if err != nil { + return err + } + items, err := apimeta.ExtractList(listObj) + if err != nil { + return err + } + accessor, err := apimeta.ListAccessor(listObj) + if err != nil { + return fmt.Errorf("object: %T must be a list type", list) + } + allItems = append(allItems, items...) + // The last list call should have the most correct resource version. + resourceVersion = accessor.GetResourceVersion() + if limitSet { + // decrement Limit by the number of items + // fetched from the current namespace. + listOpts.Limit -= int64(len(items)) + // if a Limit was set and the number of + // items read has reached this set limit, + // then stop reading. + if listOpts.Limit == 0 { + break + } + } + } + listAccessor.SetResourceVersion(resourceVersion) + + return apimeta.SetList(list, allItems) +} + +// multiNamespaceInformer knows how to handle interacting with the underlying informer across multiple namespaces. +type multiNamespaceInformer struct { + namespaceToInformer map[string]Informer +} + +var _ Informer = &multiNamespaceInformer{} + +// AddEventHandler adds the handler to each namespaced informer. +func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEventHandler) { + for _, informer := range i.namespaceToInformer { + informer.AddEventHandler(handler) + } +} + +// AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer. +func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) { + for _, informer := range i.namespaceToInformer { + informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod) + } +} + +// AddIndexers adds the indexer for each namespaced informer. +func (i *multiNamespaceInformer) AddIndexers(indexers toolscache.Indexers) error { + for _, informer := range i.namespaceToInformer { + err := informer.AddIndexers(indexers) + if err != nil { + return err + } + } + return nil +} + +// HasSynced checks if each namespaced informer has synced. +func (i *multiNamespaceInformer) HasSynced() bool { + for _, informer := range i.namespaceToInformer { + if ok := informer.HasSynced(); !ok { + return ok + } + } + return true +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go new file mode 100644 index 000000000..1030013db --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go @@ -0,0 +1,166 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certwatcher + +import ( + "context" + "crypto/tls" + "sync" + + "github.com/fsnotify/fsnotify" + "sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("certwatcher") + +// CertWatcher watches certificate and key files for changes. When either file +// changes, it reads and parses both and calls an optional callback with the new +// certificate. +type CertWatcher struct { + sync.RWMutex + + currentCert *tls.Certificate + watcher *fsnotify.Watcher + + certPath string + keyPath string +} + +// New returns a new CertWatcher watching the given certificate and key. +func New(certPath, keyPath string) (*CertWatcher, error) { + var err error + + cw := &CertWatcher{ + certPath: certPath, + keyPath: keyPath, + } + + // Initial read of certificate and key. + if err := cw.ReadCertificate(); err != nil { + return nil, err + } + + cw.watcher, err = fsnotify.NewWatcher() + if err != nil { + return nil, err + } + + return cw, nil +} + +// GetCertificate fetches the currently loaded certificate, which may be nil. +func (cw *CertWatcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { + cw.RLock() + defer cw.RUnlock() + return cw.currentCert, nil +} + +// Start starts the watch on the certificate and key files. +func (cw *CertWatcher) Start(ctx context.Context) error { + files := []string{cw.certPath, cw.keyPath} + + for _, f := range files { + if err := cw.watcher.Add(f); err != nil { + return err + } + } + + go cw.Watch() + + log.Info("Starting certificate watcher") + + // Block until the context is done. + <-ctx.Done() + + return cw.watcher.Close() +} + +// Watch reads events from the watcher's channel and reacts to changes. +func (cw *CertWatcher) Watch() { + for { + select { + case event, ok := <-cw.watcher.Events: + // Channel is closed. + if !ok { + return + } + + cw.handleEvent(event) + + case err, ok := <-cw.watcher.Errors: + // Channel is closed. + if !ok { + return + } + + log.Error(err, "certificate watch error") + } + } +} + +// ReadCertificate reads the certificate and key files from disk, parses them, +// and updates the current certificate on the watcher. If a callback is set, it +// is invoked with the new certificate. +func (cw *CertWatcher) ReadCertificate() error { + metrics.ReadCertificateTotal.Inc() + cert, err := tls.LoadX509KeyPair(cw.certPath, cw.keyPath) + if err != nil { + metrics.ReadCertificateErrors.Inc() + return err + } + + cw.Lock() + cw.currentCert = &cert + cw.Unlock() + + log.Info("Updated current TLS certificate") + + return nil +} + +func (cw *CertWatcher) handleEvent(event fsnotify.Event) { + // Only care about events which may modify the contents of the file. + if !(isWrite(event) || isRemove(event) || isCreate(event)) { + return + } + + log.V(1).Info("certificate event", "event", event) + + // If the file was removed, re-add the watch. + if isRemove(event) { + if err := cw.watcher.Add(event.Name); err != nil { + log.Error(err, "error re-watching file") + } + } + + if err := cw.ReadCertificate(); err != nil { + log.Error(err, "error re-reading certificate") + } +} + +func isWrite(event fsnotify.Event) bool { + return event.Op&fsnotify.Write == fsnotify.Write +} + +func isCreate(event fsnotify.Event) bool { + return event.Op&fsnotify.Create == fsnotify.Create +} + +func isRemove(event fsnotify.Event) bool { + return event.Op&fsnotify.Remove == fsnotify.Remove +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/doc.go new file mode 100644 index 000000000..40c2fc0bf --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package certwatcher is a helper for reloading Certificates from disk to be used +with tls servers. It provides a helper func `GetCertificate` which can be +called from `tls.Config` and passed into your tls.Listener. For a detailed +example server view pkg/webhook/server.go. +*/ +package certwatcher diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics/metrics.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics/metrics.go new file mode 100644 index 000000000..05869eff0 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics/metrics.go @@ -0,0 +1,45 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var ( + // ReadCertificateTotal is a prometheus counter metrics which holds the total + // number of certificate reads. + ReadCertificateTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "certwatcher_read_certificate_total", + Help: "Total number of certificate reads", + }) + + // ReadCertificateErrors is a prometheus counter metrics which holds the total + // number of errors from certificate read. + ReadCertificateErrors = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "certwatcher_read_certificate_errors_total", + Help: "Total number of certificate read errors", + }) +) + +func init() { + metrics.Registry.MustRegister( + ReadCertificateTotal, + ReadCertificateErrors, + ) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go new file mode 100644 index 000000000..c92b0eaae --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -0,0 +1,196 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package apiutil contains utilities for working with raw Kubernetes +// API machinery, such as creating RESTMappers and raw REST clients, +// and extracting the GVK of an object. +package apiutil + +import ( + "fmt" + "reflect" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/discovery" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +var ( + protobufScheme = runtime.NewScheme() + protobufSchemeLock sync.RWMutex +) + +func init() { + // Currently only enabled for built-in resources which are guaranteed to implement Protocol Buffers. + // For custom resources, CRDs can not support Protocol Buffers but Aggregated API can. + // See doc: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility + if err := clientgoscheme.AddToScheme(protobufScheme); err != nil { + panic(err) + } +} + +// AddToProtobufScheme add the given SchemeBuilder into protobufScheme, which should +// be additional types that do support protobuf. +func AddToProtobufScheme(addToScheme func(*runtime.Scheme) error) error { + protobufSchemeLock.Lock() + defer protobufSchemeLock.Unlock() + return addToScheme(protobufScheme) +} + +// NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery +// information fetched by a new client with the given config. +func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { + // Get a mapper + dc, err := discovery.NewDiscoveryClientForConfig(c) + if err != nil { + return nil, err + } + gr, err := restmapper.GetAPIGroupResources(dc) + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryRESTMapper(gr), nil +} + +// GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK. +func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { + // TODO(directxman12): do we want to generalize this to arbitrary container types? + // I think we'd need a generalized form of scheme or something. It's a + // shame there's not a reliable "GetGVK" interface that works by default + // for unpopulated static types and populated "dynamic" types + // (unstructured, partial, etc) + + // check for PartialObjectMetadata, which is analogous to unstructured, but isn't handled by ObjectKinds + _, isPartial := obj.(*metav1.PartialObjectMetadata) //nolint:ifshort + _, isPartialList := obj.(*metav1.PartialObjectMetadataList) + if isPartial || isPartialList { + // we require that the GVK be populated in order to recognize the object + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingKindErr("unstructured object has no kind") + } + if len(gvk.Version) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingVersionErr("unstructured object has no version") + } + return gvk, nil + } + + gvks, isUnversioned, err := scheme.ObjectKinds(obj) + if err != nil { + return schema.GroupVersionKind{}, err + } + if isUnversioned { + return schema.GroupVersionKind{}, fmt.Errorf("cannot create group-version-kind for unversioned type %T", obj) + } + + if len(gvks) < 1 { + return schema.GroupVersionKind{}, fmt.Errorf("no group-version-kinds associated with type %T", obj) + } + if len(gvks) > 1 { + // this should only trigger for things like metav1.XYZ -- + // normal versioned types should be fine + return schema.GroupVersionKind{}, fmt.Errorf( + "multiple group-version-kinds associated with type %T, refusing to guess at one", obj) + } + return gvks[0], nil +} + +// RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated +// with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from +// baseConfig, if set, otherwise a default serializer will be set. +func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) { + return rest.RESTClientFor(createRestConfig(gvk, isUnstructured, baseConfig, codecs)) +} + +// serializerWithDecodedGVK is a CodecFactory that overrides the DecoderToVersion of a WithoutConversionCodecFactory +// in order to avoid clearing the GVK from the decoded object. +// +// See https://github.com/kubernetes/kubernetes/issues/80609. +type serializerWithDecodedGVK struct { + serializer.WithoutConversionCodecFactory +} + +// DecoderToVersion returns an decoder that does not do conversion. +func (f serializerWithDecodedGVK) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { + return serializer +} + +// createRestConfig copies the base config and updates needed fields for a new rest config. +func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) *rest.Config { + gv := gvk.GroupVersion() + + cfg := rest.CopyConfig(baseConfig) + cfg.GroupVersion = &gv + if gvk.Group == "" { + cfg.APIPath = "/api" + } else { + cfg.APIPath = "/apis" + } + if cfg.UserAgent == "" { + cfg.UserAgent = rest.DefaultKubernetesUserAgent() + } + // TODO(FillZpp): In the long run, we want to check discovery or something to make sure that this is actually true. + if cfg.ContentType == "" && !isUnstructured { + protobufSchemeLock.RLock() + if protobufScheme.Recognizes(gvk) { + cfg.ContentType = runtime.ContentTypeProtobuf + } + protobufSchemeLock.RUnlock() + } + + if isUnstructured { + // If the object is unstructured, we need to preserve the GVK information. + // Use our own custom serializer. + cfg.NegotiatedSerializer = serializerWithDecodedGVK{serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} + } else { + cfg.NegotiatedSerializer = serializerWithTargetZeroingDecode{NegotiatedSerializer: serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} + } + + return cfg +} + +type serializerWithTargetZeroingDecode struct { + runtime.NegotiatedSerializer +} + +func (s serializerWithTargetZeroingDecode) DecoderToVersion(serializer runtime.Decoder, r runtime.GroupVersioner) runtime.Decoder { + return targetZeroingDecoder{upstream: s.NegotiatedSerializer.DecoderToVersion(serializer, r)} +} + +type targetZeroingDecoder struct { + upstream runtime.Decoder +} + +func (t targetZeroingDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + zero(into) + return t.upstream.Decode(data, defaults, into) +} + +// zero zeros the value of a pointer. +func zero(x interface{}) { + if x == nil { + return + } + res := reflect.ValueOf(x).Elem() + res.Set(reflect.Zero(res.Type())) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go new file mode 100644 index 000000000..e6cc51c6e --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go @@ -0,0 +1,294 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "errors" + "sync" + "sync/atomic" + + "golang.org/x/time/rate" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +// dynamicRESTMapper is a RESTMapper that dynamically discovers resource +// types at runtime. +type dynamicRESTMapper struct { + mu sync.RWMutex // protects the following fields + staticMapper meta.RESTMapper + limiter *rate.Limiter + newMapper func() (meta.RESTMapper, error) + + lazy bool + // Used for lazy init. + inited uint32 + initMtx sync.Mutex +} + +// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper. +type DynamicRESTMapperOption func(*dynamicRESTMapper) error + +// WithLimiter sets the RESTMapper's underlying limiter to lim. +func WithLimiter(lim *rate.Limiter) DynamicRESTMapperOption { + return func(drm *dynamicRESTMapper) error { + drm.limiter = lim + return nil + } +} + +// WithLazyDiscovery prevents the RESTMapper from discovering REST mappings +// until an API call is made. +var WithLazyDiscovery DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error { + drm.lazy = true + return nil +} + +// WithCustomMapper supports setting a custom RESTMapper refresher instead of +// the default method, which uses a discovery client. +// +// This exists mainly for testing, but can be useful if you need tighter control +// over how discovery is performed, which discovery endpoints are queried, etc. +func WithCustomMapper(newMapper func() (meta.RESTMapper, error)) DynamicRESTMapperOption { + return func(drm *dynamicRESTMapper) error { + drm.newMapper = newMapper + return nil + } +} + +// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic +// RESTMapper dynamically discovers resource types at runtime. opts +// configure the RESTMapper. +func NewDynamicRESTMapper(cfg *rest.Config, opts ...DynamicRESTMapperOption) (meta.RESTMapper, error) { + client, err := discovery.NewDiscoveryClientForConfig(cfg) + if err != nil { + return nil, err + } + drm := &dynamicRESTMapper{ + limiter: rate.NewLimiter(rate.Limit(defaultRefillRate), defaultLimitSize), + newMapper: func() (meta.RESTMapper, error) { + groupResources, err := restmapper.GetAPIGroupResources(client) + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryRESTMapper(groupResources), nil + }, + } + for _, opt := range opts { + if err = opt(drm); err != nil { + return nil, err + } + } + if !drm.lazy { + if err := drm.setStaticMapper(); err != nil { + return nil, err + } + } + return drm, nil +} + +var ( + // defaultRefilRate is the default rate at which potential calls are + // added back to the "bucket" of allowed calls. + defaultRefillRate = 5 + // defaultLimitSize is the default starting/max number of potential calls + // per second. Once a call is used, it's added back to the bucket at a rate + // of defaultRefillRate per second. + defaultLimitSize = 5 +) + +// setStaticMapper sets drm's staticMapper by querying its client, regardless +// of reload backoff. +func (drm *dynamicRESTMapper) setStaticMapper() error { + newMapper, err := drm.newMapper() + if err != nil { + return err + } + drm.staticMapper = newMapper + return nil +} + +// init initializes drm only once if drm is lazy. +func (drm *dynamicRESTMapper) init() (err error) { + // skip init if drm is not lazy or has initialized + if !drm.lazy || atomic.LoadUint32(&drm.inited) != 0 { + return nil + } + + drm.initMtx.Lock() + defer drm.initMtx.Unlock() + if drm.inited == 0 { + if err = drm.setStaticMapper(); err == nil { + atomic.StoreUint32(&drm.inited, 1) + } + } + return err +} + +// checkAndReload attempts to call the given callback, which is assumed to be dependent +// on the data in the restmapper. +// +// If the callback returns an error that matches the given error, it will attempt to reload +// the RESTMapper's data and re-call the callback once that's occurred. +// If the callback returns any other error, the function will return immediately regardless. +// +// It will take care of ensuring that reloads are rate-limited and that extraneous calls +// aren't made. If a reload would exceed the limiters rate, it returns the error return by +// the callback. +// It's thread-safe, and worries about thread-safety for the callback (so the callback does +// not need to attempt to lock the restmapper). +func (drm *dynamicRESTMapper) checkAndReload(needsReloadErr error, checkNeedsReload func() error) error { + // first, check the common path -- data is fresh enough + // (use an IIFE for the lock's defer) + err := func() error { + drm.mu.RLock() + defer drm.mu.RUnlock() + + return checkNeedsReload() + }() + + // NB(directxman12): `Is` and `As` have a confusing relationship -- + // `Is` is like `== or does this implement .Is`, whereas `As` says + // `can I type-assert into` + needsReload := errors.As(err, &needsReloadErr) + if !needsReload { + return err + } + + // if the data wasn't fresh, we'll need to try and update it, so grab the lock... + drm.mu.Lock() + defer drm.mu.Unlock() + + // ... and double-check that we didn't reload in the meantime + err = checkNeedsReload() + needsReload = errors.As(err, &needsReloadErr) + if !needsReload { + return err + } + + // we're still stale, so grab a rate-limit token if we can... + if !drm.limiter.Allow() { + // return error from static mapper here, we have refreshed often enough (exceeding rate of provided limiter) + // so that client's can handle this the same way as a "normal" NoResourceMatchError / NoKindMatchError + return err + } + + // ...reload... + if err := drm.setStaticMapper(); err != nil { + return err + } + + // ...and return the results of the closure regardless + return checkNeedsReload() +} + +// TODO: wrap reload errors on NoKindMatchError with go 1.13 errors. + +func (drm *dynamicRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + if err := drm.init(); err != nil { + return schema.GroupVersionKind{}, err + } + var gvk schema.GroupVersionKind + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvk, err = drm.staticMapper.KindFor(resource) + return err + }) + return gvk, err +} + +func (drm *dynamicRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + if err := drm.init(); err != nil { + return nil, err + } + var gvks []schema.GroupVersionKind + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvks, err = drm.staticMapper.KindsFor(resource) + return err + }) + return gvks, err +} + +func (drm *dynamicRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + if err := drm.init(); err != nil { + return schema.GroupVersionResource{}, err + } + + var gvr schema.GroupVersionResource + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvr, err = drm.staticMapper.ResourceFor(input) + return err + }) + return gvr, err +} + +func (drm *dynamicRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + if err := drm.init(); err != nil { + return nil, err + } + var gvrs []schema.GroupVersionResource + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvrs, err = drm.staticMapper.ResourcesFor(input) + return err + }) + return gvrs, err +} + +func (drm *dynamicRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + if err := drm.init(); err != nil { + return nil, err + } + var mapping *meta.RESTMapping + err := drm.checkAndReload(&meta.NoKindMatchError{}, func() error { + var err error + mapping, err = drm.staticMapper.RESTMapping(gk, versions...) + return err + }) + return mapping, err +} + +func (drm *dynamicRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + if err := drm.init(); err != nil { + return nil, err + } + var mappings []*meta.RESTMapping + err := drm.checkAndReload(&meta.NoKindMatchError{}, func() error { + var err error + mappings, err = drm.staticMapper.RESTMappings(gk, versions...) + return err + }) + return mappings, err +} + +func (drm *dynamicRESTMapper) ResourceSingularizer(resource string) (string, error) { + if err := drm.init(); err != nil { + return "", err + } + var singular string + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + singular, err = drm.staticMapper.ResourceSingularizer(resource) + return err + }) + return singular, err +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go new file mode 100644 index 000000000..bbe36c467 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -0,0 +1,328 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/metadata" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// WarningHandlerOptions are options for configuring a +// warning handler for the client which is responsible +// for surfacing API Server warnings. +type WarningHandlerOptions struct { + // SuppressWarnings decides if the warnings from the + // API server are suppressed or surfaced in the client. + SuppressWarnings bool + // AllowDuplicateLogs does not deduplicate the to-be + // logged surfaced warnings messages. See + // log.WarningHandlerOptions for considerations + // regarding deuplication + AllowDuplicateLogs bool +} + +// Options are creation options for a Client. +type Options struct { + // Scheme, if provided, will be used to map go structs to GroupVersionKinds + Scheme *runtime.Scheme + + // Mapper, if provided, will be used to map GroupVersionKinds to Resources + Mapper meta.RESTMapper + + // Opts is used to configure the warning handler responsible for + // surfacing and handling warnings messages sent by the API server. + Opts WarningHandlerOptions +} + +// New returns a new Client using the provided config and Options. +// The returned client reads *and* writes directly from the server +// (it doesn't use object caches). It understands how to work with +// normal types (both custom resources and aggregated/built-in resources), +// as well as unstructured types. +// +// In the case of normal types, the scheme will be used to look up the +// corresponding group, version, and kind for the given type. In the +// case of unstructured types, the group, version, and kind will be extracted +// from the corresponding fields on the object. +func New(config *rest.Config, options Options) (Client, error) { + return newClient(config, options) +} + +func newClient(config *rest.Config, options Options) (*client, error) { + if config == nil { + return nil, fmt.Errorf("must provide non-nil rest.Config to client.New") + } + + if !options.Opts.SuppressWarnings { + // surface warnings + logger := log.Log.WithName("KubeAPIWarningLogger") + // Set a WarningHandler, the default WarningHandler + // is log.KubeAPIWarningLogger with deduplication enabled. + // See log.KubeAPIWarningLoggerOptions for considerations + // regarding deduplication. + rest.SetDefaultWarningHandler( + log.NewKubeAPIWarningLogger( + logger, + log.KubeAPIWarningLoggerOptions{ + Deduplicate: !options.Opts.AllowDuplicateLogs, + }, + ), + ) + } + + // Init a scheme if none provided + if options.Scheme == nil { + options.Scheme = scheme.Scheme + } + + // Init a Mapper if none provided + if options.Mapper == nil { + var err error + options.Mapper, err = apiutil.NewDynamicRESTMapper(config) + if err != nil { + return nil, err + } + } + + clientcache := &clientCache{ + config: config, + scheme: options.Scheme, + mapper: options.Mapper, + codecs: serializer.NewCodecFactory(options.Scheme), + + structuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), + unstructuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), + } + + rawMetaClient, err := metadata.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("unable to construct metadata-only client for use as part of client: %w", err) + } + + c := &client{ + typedClient: typedClient{ + cache: clientcache, + paramCodec: runtime.NewParameterCodec(options.Scheme), + }, + unstructuredClient: unstructuredClient{ + cache: clientcache, + paramCodec: noConversionParamCodec{}, + }, + metadataClient: metadataClient{ + client: rawMetaClient, + restMapper: options.Mapper, + }, + scheme: options.Scheme, + mapper: options.Mapper, + } + + return c, nil +} + +var _ Client = &client{} + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type client struct { + typedClient typedClient + unstructuredClient unstructuredClient + metadataClient metadataClient + scheme *runtime.Scheme + mapper meta.RESTMapper +} + +// resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object. +func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersionKind) { + if gvk != schema.EmptyObjectKind.GroupVersionKind() { + if v, ok := obj.(schema.ObjectKind); ok { + v.SetGroupVersionKind(gvk) + } + } +} + +// Scheme returns the scheme this client is using. +func (c *client) Scheme() *runtime.Scheme { + return c.scheme +} + +// RESTMapper returns the scheme this client is using. +func (c *client) RESTMapper() meta.RESTMapper { + return c.mapper +} + +// Create implements client.Client. +func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Create(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot create using only metadata") + default: + return c.typedClient.Create(ctx, obj, opts...) + } +} + +// Update implements client.Client. +func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Update(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update using only metadata -- did you mean to patch?") + default: + return c.typedClient.Update(ctx, obj, opts...) + } +} + +// Delete implements client.Client. +func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Delete(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Delete(ctx, obj, opts...) + default: + return c.typedClient.Delete(ctx, obj, opts...) + } +} + +// DeleteAllOf implements client.Client. +func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.DeleteAllOf(ctx, obj, opts...) + default: + return c.typedClient.DeleteAllOf(ctx, obj, opts...) + } +} + +// Patch implements client.Client. +func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Patch(ctx, obj, patch, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Patch(ctx, obj, patch, opts...) + default: + return c.typedClient.Patch(ctx, obj, patch, opts...) + } +} + +// Get implements client.Client. +func (c *client) Get(ctx context.Context, key ObjectKey, obj Object) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Get(ctx, key, obj) + case *metav1.PartialObjectMetadata: + // Metadata only object should always preserve the GVK coming in from the caller. + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + return c.metadataClient.Get(ctx, key, obj) + default: + return c.typedClient.Get(ctx, key, obj) + } +} + +// List implements client.Client. +func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + switch x := obj.(type) { + case *unstructured.UnstructuredList: + return c.unstructuredClient.List(ctx, obj, opts...) + case *metav1.PartialObjectMetadataList: + // Metadata only object should always preserve the GVK. + gvk := obj.GetObjectKind().GroupVersionKind() + defer c.resetGroupVersionKind(obj, gvk) + + // Call the list client. + if err := c.metadataClient.List(ctx, obj, opts...); err != nil { + return err + } + + // Restore the GVK for each item in the list. + itemGVK := schema.GroupVersionKind{ + Group: gvk.Group, + Version: gvk.Version, + // TODO: this is producing unsafe guesses that don't actually work, + // but it matches ~99% of the cases out there. + Kind: strings.TrimSuffix(gvk.Kind, "List"), + } + for i := range x.Items { + item := &x.Items[i] + item.SetGroupVersionKind(itemGVK) + } + + return nil + default: + return c.typedClient.List(ctx, obj, opts...) + } +} + +// Status implements client.StatusClient. +func (c *client) Status() StatusWriter { + return &statusWriter{client: c} +} + +// statusWriter is client.StatusWriter that writes status subresource. +type statusWriter struct { + client *client +} + +// ensure statusWriter implements client.StatusWriter. +var _ StatusWriter = &statusWriter{} + +// Update implements client.StatusWriter. +func (sw *statusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return sw.client.unstructuredClient.UpdateStatus(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") + default: + return sw.client.typedClient.UpdateStatus(ctx, obj, opts...) + } +} + +// Patch implements client.Client. +func (sw *statusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return sw.client.unstructuredClient.PatchStatus(ctx, obj, patch, opts...) + case *metav1.PartialObjectMetadata: + return sw.client.metadataClient.PatchStatus(ctx, obj, patch, opts...) + default: + return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...) + } +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go new file mode 100644 index 000000000..857a0b38a --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go @@ -0,0 +1,150 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// clientCache creates and caches rest clients and metadata for Kubernetes types. +type clientCache struct { + // config is the rest.Config to talk to an apiserver + config *rest.Config + + // scheme maps go structs to GroupVersionKinds + scheme *runtime.Scheme + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper + + // codecs are used to create a REST client for a gvk + codecs serializer.CodecFactory + + // structuredResourceByType caches structured type metadata + structuredResourceByType map[schema.GroupVersionKind]*resourceMeta + // unstructuredResourceByType caches unstructured type metadata + unstructuredResourceByType map[schema.GroupVersionKind]*resourceMeta + mu sync.RWMutex +} + +// newResource maps obj to a Kubernetes Resource and constructs a client for that Resource. +// If the object is a list, the resource represents the item's type instead. +func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) { + if strings.HasSuffix(gvk.Kind, "List") && isList { + // if this was a list, treat it as a request for the item's resource + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs) + if err != nil { + return nil, err + } + mapping, err := c.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + return &resourceMeta{Interface: client, mapping: mapping, gvk: gvk}, nil +} + +// getResource returns the resource meta information for the given type of object. +// If the object is a list, the resource represents the item's type instead. +func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return nil, err + } + + _, isUnstructured := obj.(*unstructured.Unstructured) + _, isUnstructuredList := obj.(*unstructured.UnstructuredList) + isUnstructured = isUnstructured || isUnstructuredList + + // It's better to do creation work twice than to not let multiple + // people make requests at once + c.mu.RLock() + resourceByType := c.structuredResourceByType + if isUnstructured { + resourceByType = c.unstructuredResourceByType + } + r, known := resourceByType[gvk] + c.mu.RUnlock() + + if known { + return r, nil + } + + // Initialize a new Client + c.mu.Lock() + defer c.mu.Unlock() + r, err = c.newResource(gvk, meta.IsListType(obj), isUnstructured) + if err != nil { + return nil, err + } + resourceByType[gvk] = r + return r, err +} + +// getObjMeta returns objMeta containing both type and object metadata and state. +func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { + r, err := c.getResource(obj) + if err != nil { + return nil, err + } + m, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + return &objMeta{resourceMeta: r, Object: m}, err +} + +// resourceMeta caches state for a Kubernetes type. +type resourceMeta struct { + // client is the rest client used to talk to the apiserver + rest.Interface + // gvk is the GroupVersionKind of the resourceMeta + gvk schema.GroupVersionKind + // mapping is the rest mapping + mapping *meta.RESTMapping +} + +// isNamespaced returns true if the type is namespaced. +func (r *resourceMeta) isNamespaced() bool { + return r.mapping.Scope.Name() != meta.RESTScopeNameRoot +} + +// resource returns the resource name of the type. +func (r *resourceMeta) resource() string { + return r.mapping.Resource.Resource +} + +// objMeta stores type and object information about a Kubernetes type. +type objMeta struct { + // resourceMeta contains type information for the object + *resourceMeta + + // Object contains meta data for the object instance + metav1.Object +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go new file mode 100644 index 000000000..9c2923106 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go @@ -0,0 +1,40 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "errors" + "net/url" + + "k8s.io/apimachinery/pkg/conversion/queryparams" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var _ runtime.ParameterCodec = noConversionParamCodec{} + +// noConversionParamCodec is a no-conversion codec for serializing parameters into URL query strings. +// it's useful in scenarios with the unstructured client and arbitrary resources. +type noConversionParamCodec struct{} + +func (noConversionParamCodec) EncodeParameters(obj runtime.Object, to schema.GroupVersion) (url.Values, error) { + return queryparams.Convert(obj) +} + +func (noConversionParamCodec) DecodeParameters(parameters url.Values, from schema.GroupVersion, into runtime.Object) error { + return errors.New("DecodeParameters not implemented on noConversionParamCodec") +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go new file mode 100644 index 000000000..fd2772412 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go @@ -0,0 +1,157 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "flag" + "fmt" + "os" + "os/user" + "path/filepath" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var ( + kubeconfig string + log = logf.RuntimeLog.WithName("client").WithName("config") +) + +func init() { + // TODO: Fix this to allow double vendoring this library but still register flags on behalf of users + flag.StringVar(&kubeconfig, "kubeconfig", "", + "Paths to a kubeconfig. Only required if out-of-cluster.") +} + +// GetConfig creates a *rest.Config for talking to a Kubernetes API server. +// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running +// in cluster and use the cluster provided kubeconfig. +// +// It also applies saner defaults for QPS and burst based on the Kubernetes +// controller manager defaults (20 QPS, 30 burst) +// +// Config precedence +// +// * --kubeconfig flag pointing at a file +// +// * KUBECONFIG environment variable pointing at a file +// +// * In-cluster config if running in cluster +// +// * $HOME/.kube/config if exists. +func GetConfig() (*rest.Config, error) { + return GetConfigWithContext("") +} + +// GetConfigWithContext creates a *rest.Config for talking to a Kubernetes API server with a specific context. +// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running +// in cluster and use the cluster provided kubeconfig. +// +// It also applies saner defaults for QPS and burst based on the Kubernetes +// controller manager defaults (20 QPS, 30 burst) +// +// Config precedence +// +// * --kubeconfig flag pointing at a file +// +// * KUBECONFIG environment variable pointing at a file +// +// * In-cluster config if running in cluster +// +// * $HOME/.kube/config if exists. +func GetConfigWithContext(context string) (*rest.Config, error) { + cfg, err := loadConfig(context) + if err != nil { + return nil, err + } + + if cfg.QPS == 0.0 { + cfg.QPS = 20.0 + cfg.Burst = 30.0 + } + + return cfg, nil +} + +// loadInClusterConfig is a function used to load the in-cluster +// Kubernetes client config. This variable makes is possible to +// test the precedence of loading the config. +var loadInClusterConfig = rest.InClusterConfig + +// loadConfig loads a REST Config as per the rules specified in GetConfig. +func loadConfig(context string) (*rest.Config, error) { + // If a flag is specified with the config location, use that + if len(kubeconfig) > 0 { + return loadConfigWithContext("", &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}, context) + } + + // If the recommended kubeconfig env variable is not specified, + // try the in-cluster config. + kubeconfigPath := os.Getenv(clientcmd.RecommendedConfigPathEnvVar) + if len(kubeconfigPath) == 0 { + if c, err := loadInClusterConfig(); err == nil { + return c, nil + } + } + + // If the recommended kubeconfig env variable is set, or there + // is no in-cluster config, try the default recommended locations. + // + // NOTE: For default config file locations, upstream only checks + // $HOME for the user's home directory, but we can also try + // os/user.HomeDir when $HOME is unset. + // + // TODO(jlanford): could this be done upstream? + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + if _, ok := os.LookupEnv("HOME"); !ok { + u, err := user.Current() + if err != nil { + return nil, fmt.Errorf("could not get current user: %w", err) + } + loadingRules.Precedence = append(loadingRules.Precedence, filepath.Join(u.HomeDir, clientcmd.RecommendedHomeDir, clientcmd.RecommendedFileName)) + } + + return loadConfigWithContext("", loadingRules, context) +} + +func loadConfigWithContext(apiServerURL string, loader clientcmd.ClientConfigLoader, context string) (*rest.Config, error) { + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + loader, + &clientcmd.ConfigOverrides{ + ClusterInfo: clientcmdapi.Cluster{ + Server: apiServerURL, + }, + CurrentContext: context, + }).ClientConfig() +} + +// GetConfigOrDie creates a *rest.Config for talking to a Kubernetes apiserver. +// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running +// in cluster and use the cluster provided kubeconfig. +// +// Will log an error and exit if there is an error creating the rest.Config. +func GetConfigOrDie() *rest.Config { + config, err := GetConfig() + if err != nil { + log.Error(err, "unable to get kubeconfig") + os.Exit(1) + } + return config +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/doc.go new file mode 100644 index 000000000..796c9cf59 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package config contains libraries for initializing REST configs for talking to the Kubernetes API +package config diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go new file mode 100644 index 000000000..2965e5fa9 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go @@ -0,0 +1,49 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package client contains functionality for interacting with Kubernetes API +// servers. +// +// Clients +// +// Clients are split into two interfaces -- Readers and Writers. Readers +// get and list, while writers create, update, and delete. +// +// The New function can be used to create a new client that talks directly +// to the API server. +// +// A common pattern in Kubernetes to read from a cache and write to the API +// server. This pattern is covered by the DelegatingClient type, which can +// be used to have a client whose Reader is different from the Writer. +// +// Options +// +// Many client operations in Kubernetes support options. These options are +// represented as variadic arguments at the end of a given method call. +// For instance, to use a label selector on list, you can call +// err := someReader.List(context.Background(), &podList, client.MatchingLabels{"somelabel": "someval"}) +// +// Indexing +// +// Indexes may be added to caches using a FieldIndexer. This allows you to easily +// and efficiently look up objects with certain properties. You can then make +// use of the index by specifying a field selector on calls to List on the Reader +// corresponding to the given Cache. +// +// For instance, a Secret controller might have an index on the +// `.spec.volumes.secret.secretName` field in Pod objects, so that it could +// easily look up all pods that reference a given secret. +package client diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go new file mode 100644 index 000000000..ea25ea253 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go @@ -0,0 +1,106 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" +) + +// NewDryRunClient wraps an existing client and enforces DryRun mode +// on all mutating api calls. +func NewDryRunClient(c Client) Client { + return &dryRunClient{client: c} +} + +var _ Client = &dryRunClient{} + +// dryRunClient is a Client that wraps another Client in order to enforce DryRun mode. +type dryRunClient struct { + client Client +} + +// Scheme returns the scheme this client is using. +func (c *dryRunClient) Scheme() *runtime.Scheme { + return c.client.Scheme() +} + +// RESTMapper returns the rest mapper this client is using. +func (c *dryRunClient) RESTMapper() meta.RESTMapper { + return c.client.RESTMapper() +} + +// Create implements client.Client. +func (c *dryRunClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + return c.client.Create(ctx, obj, append(opts, DryRunAll)...) +} + +// Update implements client.Client. +func (c *dryRunClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + return c.client.Update(ctx, obj, append(opts, DryRunAll)...) +} + +// Delete implements client.Client. +func (c *dryRunClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + return c.client.Delete(ctx, obj, append(opts, DryRunAll)...) +} + +// DeleteAllOf implements client.Client. +func (c *dryRunClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + return c.client.DeleteAllOf(ctx, obj, append(opts, DryRunAll)...) +} + +// Patch implements client.Client. +func (c *dryRunClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + return c.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) +} + +// Get implements client.Client. +func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + return c.client.Get(ctx, key, obj) +} + +// List implements client.Client. +func (c *dryRunClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + return c.client.List(ctx, obj, opts...) +} + +// Status implements client.StatusClient. +func (c *dryRunClient) Status() StatusWriter { + return &dryRunStatusWriter{client: c.client.Status()} +} + +// ensure dryRunStatusWriter implements client.StatusWriter. +var _ StatusWriter = &dryRunStatusWriter{} + +// dryRunStatusWriter is client.StatusWriter that writes status subresource with dryRun mode +// enforced. +type dryRunStatusWriter struct { + client StatusWriter +} + +// Update implements client.StatusWriter. +func (sw *dryRunStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + return sw.client.Update(ctx, obj, append(opts, DryRunAll)...) +} + +// Patch implements client.StatusWriter. +func (sw *dryRunStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + return sw.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go new file mode 100644 index 000000000..58c2ece15 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go @@ -0,0 +1,145 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" +) + +// ObjectKey identifies a Kubernetes Object. +type ObjectKey = types.NamespacedName + +// ObjectKeyFromObject returns the ObjectKey given a runtime.Object. +func ObjectKeyFromObject(obj Object) ObjectKey { + return ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()} +} + +// Patch is a patch that can be applied to a Kubernetes object. +type Patch interface { + // Type is the PatchType of the patch. + Type() types.PatchType + // Data is the raw data representing the patch. + Data(obj Object) ([]byte, error) +} + +// TODO(directxman12): is there a sane way to deal with get/delete options? + +// Reader knows how to read and list Kubernetes objects. +type Reader interface { + // Get retrieves an obj for the given object key from the Kubernetes Cluster. + // obj must be a struct pointer so that obj can be updated with the response + // returned by the Server. + Get(ctx context.Context, key ObjectKey, obj Object) error + + // List retrieves list of objects for a given namespace and list options. On a + // successful call, Items field in the list will be populated with the + // result returned from the server. + List(ctx context.Context, list ObjectList, opts ...ListOption) error +} + +// Writer knows how to create, delete, and update Kubernetes objects. +type Writer interface { + // Create saves the object obj in the Kubernetes cluster. + Create(ctx context.Context, obj Object, opts ...CreateOption) error + + // Delete deletes the given obj from Kubernetes cluster. + Delete(ctx context.Context, obj Object, opts ...DeleteOption) error + + // Update updates the given obj in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Update(ctx context.Context, obj Object, opts ...UpdateOption) error + + // Patch patches the given obj in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error + + // DeleteAllOf deletes all objects of the given type matching the given options. + DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error +} + +// StatusClient knows how to create a client which can update status subresource +// for kubernetes objects. +type StatusClient interface { + Status() StatusWriter +} + +// StatusWriter knows how to update status subresource of a Kubernetes object. +type StatusWriter interface { + // Update updates the fields corresponding to the status subresource for the + // given obj. obj must be a struct pointer so that obj can be updated + // with the content returned by the Server. + Update(ctx context.Context, obj Object, opts ...UpdateOption) error + + // Patch patches the given object's subresource. obj must be a struct + // pointer so that obj can be updated with the content returned by the + // Server. + Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error +} + +// Client knows how to perform CRUD operations on Kubernetes objects. +type Client interface { + Reader + Writer + StatusClient + + // Scheme returns the scheme this client is using. + Scheme() *runtime.Scheme + // RESTMapper returns the rest this client is using. + RESTMapper() meta.RESTMapper +} + +// WithWatch supports Watch on top of the CRUD operations supported by +// the normal Client. Its intended use-case are CLI apps that need to wait for +// events. +type WithWatch interface { + Client + Watch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error) +} + +// IndexerFunc knows how to take an object and turn it into a series +// of non-namespaced keys. Namespaced objects are automatically given +// namespaced and non-spaced variants, so keys do not need to include namespace. +type IndexerFunc func(Object) []string + +// FieldIndexer knows how to index over a particular "field" such that it +// can later be used by a field selector. +type FieldIndexer interface { + // IndexFields adds an index with the given field name on the given object type + // by using the given function to extract the value for that field. If you want + // compatibility with the Kubernetes API server, only return one key, and only use + // fields that the API server supports. Otherwise, you can return multiple keys, + // and "equality" in the field selector means that at least one key matches the value. + // The FieldIndexer will automatically take care of indexing over namespace + // and supporting efficient all-namespace queries. + IndexField(ctx context.Context, obj Object, field string, extractValue IndexerFunc) error +} + +// IgnoreNotFound returns nil on NotFound errors. +// All other values that are not NotFound errors or nil are returned unmodified. +func IgnoreNotFound(err error) error { + if apierrors.IsNotFound(err) { + return nil + } + return err +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go new file mode 100644 index 000000000..db495ca46 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go @@ -0,0 +1,193 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/metadata" +) + +// TODO(directxman12): we could rewrite this on top of the low-level REST +// client to avoid the extra shallow copy at the end, but I'm not sure it's +// worth it -- the metadata client deals with falling back to loading the whole +// object on older API servers, etc, and we'd have to reproduce that. + +// metadataClient is a client that reads & writes metadata-only requests to/from the API server. +type metadataClient struct { + client metadata.Interface + restMapper meta.RESTMapper +} + +func (mc *metadataClient) getResourceInterface(gvk schema.GroupVersionKind, ns string) (metadata.ResourceInterface, error) { + mapping, err := mc.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + if mapping.Scope.Name() == meta.RESTScopeNameRoot { + return mc.client.Resource(mapping.Resource), nil + } + return mc.client.Resource(mapping.Resource).Namespace(ns), nil +} + +// Delete implements client.Client. +func (mc *metadataClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), metadata.Namespace) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + + return resInt.Delete(ctx, metadata.Name, *deleteOpts.AsDeleteOptions()) +} + +// DeleteAllOf implements client.Client. +func (mc *metadataClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), deleteAllOfOpts.ListOptions.Namespace) + if err != nil { + return err + } + + return resInt.DeleteCollection(ctx, *deleteAllOfOpts.AsDeleteOptions(), *deleteAllOfOpts.AsListOptions()) +} + +// Patch implements client.Client. +func (mc *metadataClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + resInt, err := mc.getResourceInterface(gvk, metadata.Namespace) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + patchOpts.ApplyOptions(opts) + + res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +// Get implements client.Client. +func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + + resInt, err := mc.getResourceInterface(gvk, key.Namespace) + if err != nil { + return err + } + + res, err := resInt.Get(ctx, key.Name, metav1.GetOptions{}) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +// List implements client.Client. +func (mc *metadataClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadataList) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(gvk, listOpts.Namespace) + if err != nil { + return err + } + + res, err := resInt.List(ctx, *listOpts.AsListOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +func (mc *metadataClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + resInt, err := mc.getResourceInterface(gvk, metadata.Namespace) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions(), "status") + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go new file mode 100644 index 000000000..45a694543 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go @@ -0,0 +1,213 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" +) + +// NewNamespacedClient wraps an existing client enforcing the namespace value. +// All functions using this client will have the same namespace declared here. +func NewNamespacedClient(c Client, ns string) Client { + return &namespacedClient{ + client: c, + namespace: ns, + } +} + +var _ Client = &namespacedClient{} + +// namespacedClient is a Client that wraps another Client in order to enforce the specified namespace value. +type namespacedClient struct { + namespace string + client Client +} + +// Scheme returns the scheme this client is using. +func (n *namespacedClient) Scheme() *runtime.Scheme { + return n.client.Scheme() +} + +// RESTMapper returns the scheme this client is using. +func (n *namespacedClient) RESTMapper() meta.RESTMapper { + return n.client.RESTMapper() +} + +// Create implements clinet.Client. +func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Create(ctx, obj, opts...) +} + +// Update implements client.Client. +func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Update(ctx, obj, opts...) +} + +// Delete implements client.Client. +func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Delete(ctx, obj, opts...) +} + +// DeleteAllOf implements client.Client. +func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + if isNamespaceScoped { + opts = append(opts, InNamespace(n.namespace)) + } + return n.client.DeleteAllOf(ctx, obj, opts...) +} + +// Patch implements client.Client. +func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Patch(ctx, obj, patch, opts...) +} + +// Get implements client.Client. +func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + if isNamespaceScoped { + if key.Namespace != "" && key.Namespace != n.namespace { + return fmt.Errorf("namespace %s provided for the object %s does not match the namesapce %s on the client", key.Namespace, obj.GetName(), n.namespace) + } + key.Namespace = n.namespace + } + return n.client.Get(ctx, key, obj) +} + +// List implements client.Client. +func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + if n.namespace != "" { + opts = append(opts, InNamespace(n.namespace)) + } + return n.client.List(ctx, obj, opts...) +} + +// Status implements client.StatusClient. +func (n *namespacedClient) Status() StatusWriter { + return &namespacedClientStatusWriter{StatusClient: n.client.Status(), namespace: n.namespace, namespacedclient: n} +} + +// ensure namespacedClientStatusWriter implements client.StatusWriter. +var _ StatusWriter = &namespacedClientStatusWriter{} + +type namespacedClientStatusWriter struct { + StatusClient StatusWriter + namespace string + namespacedclient Client +} + +// Update implements client.StatusWriter. +func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespace) + } + return nsw.StatusClient.Update(ctx, obj, opts...) +} + +// Patch implements client.StatusWriter. +func (nsw *namespacedClientStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespace) + } + return nsw.StatusClient.Patch(ctx, obj, patch, opts...) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go new file mode 100644 index 000000000..31e334d6c --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go @@ -0,0 +1,77 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// Object is a Kubernetes object, allows functions to work indistinctly with +// any resource that implements both Object interfaces. +// +// Semantically, these are objects which are both serializable (runtime.Object) +// and identifiable (metav1.Object) -- think any object which you could write +// as YAML or JSON, and then `kubectl create`. +// +// Code-wise, this means that any object which embeds both ObjectMeta (which +// provides metav1.Object) and TypeMeta (which provides half of runtime.Object) +// and has a `DeepCopyObject` implementation (the other half of runtime.Object) +// will implement this by default. +// +// For example, nearly all the built-in types are Objects, as well as all +// KubeBuilder-generated CRDs (unless you do something real funky to them). +// +// By and large, most things that implement runtime.Object also implement +// Object -- it's very rare to have *just* a runtime.Object implementation (the +// cases tend to be funky built-in types like Webhook payloads that don't have +// a `metadata` field). +// +// Notice that XYZList types are distinct: they implement ObjectList instead. +type Object interface { + metav1.Object + runtime.Object +} + +// ObjectList is a Kubernetes object list, allows functions to work +// indistinctly with any resource that implements both runtime.Object and +// metav1.ListInterface interfaces. +// +// Semantically, this is any object which may be serialized (ObjectMeta), and +// is a kubernetes list wrapper (has items, pagination fields, etc) -- think +// the wrapper used in a response from a `kubectl list --output yaml` call. +// +// Code-wise, this means that any object which embedds both ListMeta (which +// provides metav1.ListInterface) and TypeMeta (which provides half of +// runtime.Object) and has a `DeepCopyObject` implementation (the other half of +// runtime.Object) will implement this by default. +// +// For example, nearly all the built-in XYZList types are ObjectLists, as well +// as the XYZList types for all KubeBuilder-generated CRDs (unless you do +// something real funky to them). +// +// By and large, most things that are XYZList and implement runtime.Object also +// implement ObjectList -- it's very rare to have *just* a runtime.Object +// implementation (the cases tend to be funky built-in types like Webhook +// payloads that don't have a `metadata` field). +// +// This is similar to Object, which is almost always implemented by the items +// in the list themselves. +type ObjectList interface { + metav1.ListInterface + runtime.Object +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go new file mode 100644 index 000000000..7990f56ab --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -0,0 +1,697 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" +) + +// {{{ "Functional" Option Interfaces + +// CreateOption is some configuration that modifies options for a create request. +type CreateOption interface { + // ApplyToCreate applies this configuration to the given create options. + ApplyToCreate(*CreateOptions) +} + +// DeleteOption is some configuration that modifies options for a delete request. +type DeleteOption interface { + // ApplyToDelete applies this configuration to the given delete options. + ApplyToDelete(*DeleteOptions) +} + +// ListOption is some configuration that modifies options for a list request. +type ListOption interface { + // ApplyToList applies this configuration to the given list options. + ApplyToList(*ListOptions) +} + +// UpdateOption is some configuration that modifies options for a update request. +type UpdateOption interface { + // ApplyToUpdate applies this configuration to the given update options. + ApplyToUpdate(*UpdateOptions) +} + +// PatchOption is some configuration that modifies options for a patch request. +type PatchOption interface { + // ApplyToPatch applies this configuration to the given patch options. + ApplyToPatch(*PatchOptions) +} + +// DeleteAllOfOption is some configuration that modifies options for a delete request. +type DeleteAllOfOption interface { + // ApplyToDeleteAllOf applies this configuration to the given deletecollection options. + ApplyToDeleteAllOf(*DeleteAllOfOptions) +} + +// }}} + +// {{{ Multi-Type Options + +// DryRunAll sets the "dry run" option to "all", executing all +// validation, etc without persisting the change to storage. +var DryRunAll = dryRunAll{} + +type dryRunAll struct{} + +// ApplyToCreate applies this configuration to the given create options. +func (dryRunAll) ApplyToCreate(opts *CreateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToUpdate applies this configuration to the given update options. +func (dryRunAll) ApplyToUpdate(opts *UpdateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToPatch applies this configuration to the given patch options. +func (dryRunAll) ApplyToPatch(opts *PatchOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToPatch applies this configuration to the given delete options. +func (dryRunAll) ApplyToDelete(opts *DeleteOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} +func (dryRunAll) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// FieldOwner set the field manager name for the given server-side apply patch. +type FieldOwner string + +// ApplyToPatch applies this configuration to the given patch options. +func (f FieldOwner) ApplyToPatch(opts *PatchOptions) { + opts.FieldManager = string(f) +} + +// ApplyToCreate applies this configuration to the given create options. +func (f FieldOwner) ApplyToCreate(opts *CreateOptions) { + opts.FieldManager = string(f) +} + +// ApplyToUpdate applies this configuration to the given update options. +func (f FieldOwner) ApplyToUpdate(opts *UpdateOptions) { + opts.FieldManager = string(f) +} + +// }}} + +// {{{ Create Options + +// CreateOptions contains options for create requests. It's generally a subset +// of metav1.CreateOptions. +type CreateOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw CreateOptions, as passed to the API server. + Raw *metav1.CreateOptions +} + +// AsCreateOptions returns these options as a metav1.CreateOptions. +// This may mutate the Raw field. +func (o *CreateOptions) AsCreateOptions() *metav1.CreateOptions { + if o == nil { + return &metav1.CreateOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.CreateOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +// ApplyOptions applies the given create options on these options, +// and then returns itself (for convenient chaining). +func (o *CreateOptions) ApplyOptions(opts []CreateOption) *CreateOptions { + for _, opt := range opts { + opt.ApplyToCreate(o) + } + return o +} + +// ApplyToCreate implements CreateOption. +func (o *CreateOptions) ApplyToCreate(co *CreateOptions) { + if o.DryRun != nil { + co.DryRun = o.DryRun + } + if o.FieldManager != "" { + co.FieldManager = o.FieldManager + } + if o.Raw != nil { + co.Raw = o.Raw + } +} + +var _ CreateOption = &CreateOptions{} + +// }}} + +// {{{ Delete Options + +// DeleteOptions contains options for delete requests. It's generally a subset +// of metav1.DeleteOptions. +type DeleteOptions struct { + // GracePeriodSeconds is the duration in seconds before the object should be + // deleted. Value must be non-negative integer. The value zero indicates + // delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + GracePeriodSeconds *int64 + + // Preconditions must be fulfilled before a deletion is carried out. If not + // possible, a 409 Conflict status will be returned. + Preconditions *metav1.Preconditions + + // PropagationPolicy determined whether and how garbage collection will be + // performed. Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. + PropagationPolicy *metav1.DeletionPropagation + + // Raw represents raw DeleteOptions, as passed to the API server. + Raw *metav1.DeleteOptions + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string +} + +// AsDeleteOptions returns these options as a metav1.DeleteOptions. +// This may mutate the Raw field. +func (o *DeleteOptions) AsDeleteOptions() *metav1.DeleteOptions { + if o == nil { + return &metav1.DeleteOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.DeleteOptions{} + } + + o.Raw.GracePeriodSeconds = o.GracePeriodSeconds + o.Raw.Preconditions = o.Preconditions + o.Raw.PropagationPolicy = o.PropagationPolicy + o.Raw.DryRun = o.DryRun + return o.Raw +} + +// ApplyOptions applies the given delete options on these options, +// and then returns itself (for convenient chaining). +func (o *DeleteOptions) ApplyOptions(opts []DeleteOption) *DeleteOptions { + for _, opt := range opts { + opt.ApplyToDelete(o) + } + return o +} + +var _ DeleteOption = &DeleteOptions{} + +// ApplyToDelete implements DeleteOption. +func (o *DeleteOptions) ApplyToDelete(do *DeleteOptions) { + if o.GracePeriodSeconds != nil { + do.GracePeriodSeconds = o.GracePeriodSeconds + } + if o.Preconditions != nil { + do.Preconditions = o.Preconditions + } + if o.PropagationPolicy != nil { + do.PropagationPolicy = o.PropagationPolicy + } + if o.Raw != nil { + do.Raw = o.Raw + } + if o.DryRun != nil { + do.DryRun = o.DryRun + } +} + +// GracePeriodSeconds sets the grace period for the deletion +// to the given number of seconds. +type GracePeriodSeconds int64 + +// ApplyToDelete applies this configuration to the given delete options. +func (s GracePeriodSeconds) ApplyToDelete(opts *DeleteOptions) { + secs := int64(s) + opts.GracePeriodSeconds = &secs +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (s GracePeriodSeconds) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + s.ApplyToDelete(&opts.DeleteOptions) +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions metav1.Preconditions + +// ApplyToDelete applies this configuration to the given delete options. +func (p Preconditions) ApplyToDelete(opts *DeleteOptions) { + preconds := metav1.Preconditions(p) + opts.Preconditions = &preconds +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (p Preconditions) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + p.ApplyToDelete(&opts.DeleteOptions) +} + +// PropagationPolicy determined whether and how garbage collection will be +// performed. Either this field or OrphanDependents may be set, but not both. +// The default policy is decided by the existing finalizer set in the +// metadata.finalizers and the resource-specific default policy. +// Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - +// allow the garbage collector to delete the dependents in the background; +// 'Foreground' - a cascading policy that deletes all dependents in the +// foreground. +type PropagationPolicy metav1.DeletionPropagation + +// ApplyToDelete applies the given delete options on these options. +// It will propagate to the dependents of the object to let the garbage collector handle it. +func (p PropagationPolicy) ApplyToDelete(opts *DeleteOptions) { + policy := metav1.DeletionPropagation(p) + opts.PropagationPolicy = &policy +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (p PropagationPolicy) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + p.ApplyToDelete(&opts.DeleteOptions) +} + +// }}} + +// {{{ List Options + +// ListOptions contains options for limiting or filtering results. +// It's generally a subset of metav1.ListOptions, with support for +// pre-parsed selectors (since generally, selectors will be executed +// against the cache). +type ListOptions struct { + // LabelSelector filters results by label. Use labels.Parse() to + // set from raw string form. + LabelSelector labels.Selector + // FieldSelector filters results by a particular field. In order + // to use this with cache-based implementations, restrict usage to + // a single field-value pair that's been added to the indexers. + FieldSelector fields.Selector + + // Namespace represents the namespace to list for, or empty for + // non-namespaced objects, or to list across all namespaces. + Namespace string + + // Limit specifies the maximum number of results to return from the server. The server may + // not support this field on all resource types, but if it does and more results remain it + // will set the continue field on the returned list object. This field is not supported if watch + // is true in the Raw ListOptions. + Limit int64 + // Continue is a token returned by the server that lets a client retrieve chunks of results + // from the server by specifying limit. The server may reject requests for continuation tokens + // it does not recognize and will return a 410 error if the token can no longer be used because + // it has expired. This field is not supported if watch is true in the Raw ListOptions. + Continue string + + // Raw represents raw ListOptions, as passed to the API server. Note + // that these may not be respected by all implementations of interface, + // and the LabelSelector, FieldSelector, Limit and Continue fields are ignored. + Raw *metav1.ListOptions +} + +var _ ListOption = &ListOptions{} + +// ApplyToList implements ListOption for ListOptions. +func (o *ListOptions) ApplyToList(lo *ListOptions) { + if o.LabelSelector != nil { + lo.LabelSelector = o.LabelSelector + } + if o.FieldSelector != nil { + lo.FieldSelector = o.FieldSelector + } + if o.Namespace != "" { + lo.Namespace = o.Namespace + } + if o.Raw != nil { + lo.Raw = o.Raw + } + if o.Limit > 0 { + lo.Limit = o.Limit + } + if o.Continue != "" { + lo.Continue = o.Continue + } +} + +// AsListOptions returns these options as a flattened metav1.ListOptions. +// This may mutate the Raw field. +func (o *ListOptions) AsListOptions() *metav1.ListOptions { + if o == nil { + return &metav1.ListOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.ListOptions{} + } + if o.LabelSelector != nil { + o.Raw.LabelSelector = o.LabelSelector.String() + } + if o.FieldSelector != nil { + o.Raw.FieldSelector = o.FieldSelector.String() + } + if !o.Raw.Watch { + o.Raw.Limit = o.Limit + o.Raw.Continue = o.Continue + } + return o.Raw +} + +// ApplyOptions applies the given list options on these options, +// and then returns itself (for convenient chaining). +func (o *ListOptions) ApplyOptions(opts []ListOption) *ListOptions { + for _, opt := range opts { + opt.ApplyToList(o) + } + return o +} + +// MatchingLabels filters the list/delete operation on the given set of labels. +type MatchingLabels map[string]string + +// ApplyToList applies this configuration to the given list options. +func (m MatchingLabels) ApplyToList(opts *ListOptions) { + // TODO(directxman12): can we avoid reserializing this over and over? + sel := labels.SelectorFromValidatedSet(map[string]string(m)) + opts.LabelSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// HasLabels filters the list/delete operation checking if the set of labels exists +// without checking their values. +type HasLabels []string + +// ApplyToList applies this configuration to the given list options. +func (m HasLabels) ApplyToList(opts *ListOptions) { + sel := labels.NewSelector() + for _, label := range m { + r, err := labels.NewRequirement(label, selection.Exists, nil) + if err == nil { + sel = sel.Add(*r) + } + } + opts.LabelSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m HasLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingLabelsSelector filters the list/delete operation on the given label +// selector (or index in the case of cached lists). A struct is used because +// labels.Selector is an interface, which cannot be aliased. +type MatchingLabelsSelector struct { + labels.Selector +} + +// ApplyToList applies this configuration to the given list options. +func (m MatchingLabelsSelector) ApplyToList(opts *ListOptions) { + opts.LabelSelector = m +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingLabelsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingFields filters the list/delete operation on the given field Set +// (or index in the case of cached lists). +type MatchingFields fields.Set + +// ApplyToList applies this configuration to the given list options. +func (m MatchingFields) ApplyToList(opts *ListOptions) { + // TODO(directxman12): can we avoid re-serializing this? + sel := fields.Set(m).AsSelector() + opts.FieldSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingFields) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingFieldsSelector filters the list/delete operation on the given field +// selector (or index in the case of cached lists). A struct is used because +// fields.Selector is an interface, which cannot be aliased. +type MatchingFieldsSelector struct { + fields.Selector +} + +// ApplyToList applies this configuration to the given list options. +func (m MatchingFieldsSelector) ApplyToList(opts *ListOptions) { + opts.FieldSelector = m +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingFieldsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// InNamespace restricts the list/delete operation to the given namespace. +type InNamespace string + +// ApplyToList applies this configuration to the given list options. +func (n InNamespace) ApplyToList(opts *ListOptions) { + opts.Namespace = string(n) +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (n InNamespace) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + n.ApplyToList(&opts.ListOptions) +} + +// Limit specifies the maximum number of results to return from the server. +// Limit does not implement DeleteAllOfOption interface because the server +// does not support setting it for deletecollection operations. +type Limit int64 + +// ApplyToList applies this configuration to the given an list options. +func (l Limit) ApplyToList(opts *ListOptions) { + opts.Limit = int64(l) +} + +// Continue sets a continuation token to retrieve chunks of results when using limit. +// Continue does not implement DeleteAllOfOption interface because the server +// does not support setting it for deletecollection operations. +type Continue string + +// ApplyToList applies this configuration to the given an List options. +func (c Continue) ApplyToList(opts *ListOptions) { + opts.Continue = string(c) +} + +// }}} + +// {{{ Update Options + +// UpdateOptions contains options for create requests. It's generally a subset +// of metav1.UpdateOptions. +type UpdateOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw UpdateOptions, as passed to the API server. + Raw *metav1.UpdateOptions +} + +// AsUpdateOptions returns these options as a metav1.UpdateOptions. +// This may mutate the Raw field. +func (o *UpdateOptions) AsUpdateOptions() *metav1.UpdateOptions { + if o == nil { + return &metav1.UpdateOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.UpdateOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +// ApplyOptions applies the given update options on these options, +// and then returns itself (for convenient chaining). +func (o *UpdateOptions) ApplyOptions(opts []UpdateOption) *UpdateOptions { + for _, opt := range opts { + opt.ApplyToUpdate(o) + } + return o +} + +var _ UpdateOption = &UpdateOptions{} + +// ApplyToUpdate implements UpdateOption. +func (o *UpdateOptions) ApplyToUpdate(uo *UpdateOptions) { + if o.DryRun != nil { + uo.DryRun = o.DryRun + } + if o.FieldManager != "" { + uo.FieldManager = o.FieldManager + } + if o.Raw != nil { + uo.Raw = o.Raw + } +} + +// }}} + +// {{{ Patch Options + +// PatchOptions contains options for patch requests. +type PatchOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // Force is going to "force" Apply requests. It means user will + // re-acquire conflicting fields owned by other people. Force + // flag must be unset for non-apply patch requests. + // +optional + Force *bool + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw PatchOptions, as passed to the API server. + Raw *metav1.PatchOptions +} + +// ApplyOptions applies the given patch options on these options, +// and then returns itself (for convenient chaining). +func (o *PatchOptions) ApplyOptions(opts []PatchOption) *PatchOptions { + for _, opt := range opts { + opt.ApplyToPatch(o) + } + return o +} + +// AsPatchOptions returns these options as a metav1.PatchOptions. +// This may mutate the Raw field. +func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions { + if o == nil { + return &metav1.PatchOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.PatchOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.Force = o.Force + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +var _ PatchOption = &PatchOptions{} + +// ApplyToPatch implements PatchOptions. +func (o *PatchOptions) ApplyToPatch(po *PatchOptions) { + if o.DryRun != nil { + po.DryRun = o.DryRun + } + if o.Force != nil { + po.Force = o.Force + } + if o.FieldManager != "" { + po.FieldManager = o.FieldManager + } + if o.Raw != nil { + po.Raw = o.Raw + } +} + +// ForceOwnership indicates that in case of conflicts with server-side apply, +// the client should acquire ownership of the conflicting field. Most +// controllers should use this. +var ForceOwnership = forceOwnership{} + +type forceOwnership struct{} + +func (forceOwnership) ApplyToPatch(opts *PatchOptions) { + definitelyTrue := true + opts.Force = &definitelyTrue +} + +// }}} + +// {{{ DeleteAllOf Options + +// these are all just delete options and list options + +// DeleteAllOfOptions contains options for deletecollection (deleteallof) requests. +// It's just list and delete options smooshed together. +type DeleteAllOfOptions struct { + ListOptions + DeleteOptions +} + +// ApplyOptions applies the given deleteallof options on these options, +// and then returns itself (for convenient chaining). +func (o *DeleteAllOfOptions) ApplyOptions(opts []DeleteAllOfOption) *DeleteAllOfOptions { + for _, opt := range opts { + opt.ApplyToDeleteAllOf(o) + } + return o +} + +var _ DeleteAllOfOption = &DeleteAllOfOptions{} + +// ApplyToDeleteAllOf implements DeleteAllOfOption. +func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(do *DeleteAllOfOptions) { + o.ApplyToList(&do.ListOptions) + o.ApplyToDelete(&do.DeleteOptions) +} + +// }}} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go new file mode 100644 index 000000000..10984c534 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go @@ -0,0 +1,213 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/strategicpatch" +) + +var ( + // Apply uses server-side apply to patch the given object. + Apply Patch = applyPatch{} + + // Merge uses the raw object as a merge patch, without modifications. + // Use MergeFrom if you wish to compute a diff instead. + Merge Patch = mergePatch{} +) + +type patch struct { + patchType types.PatchType + data []byte +} + +// Type implements Patch. +func (s *patch) Type() types.PatchType { + return s.patchType +} + +// Data implements Patch. +func (s *patch) Data(obj Object) ([]byte, error) { + return s.data, nil +} + +// RawPatch constructs a new Patch with the given PatchType and data. +func RawPatch(patchType types.PatchType, data []byte) Patch { + return &patch{patchType, data} +} + +// MergeFromWithOptimisticLock can be used if clients want to make sure a patch +// is being applied to the latest resource version of an object. +// +// The behavior is similar to what an Update would do, without the need to send the +// whole object. Usually this method is useful if you might have multiple clients +// acting on the same object and the same API version, but with different versions of the Go structs. +// +// For example, an "older" copy of a Widget that has fields A and B, and a "newer" copy with A, B, and C. +// Sending an update using the older struct definition results in C being dropped, whereas using a patch does not. +type MergeFromWithOptimisticLock struct{} + +// ApplyToMergeFrom applies this configuration to the given patch options. +func (m MergeFromWithOptimisticLock) ApplyToMergeFrom(in *MergeFromOptions) { + in.OptimisticLock = true +} + +// MergeFromOption is some configuration that modifies options for a merge-from patch data. +type MergeFromOption interface { + // ApplyToMergeFrom applies this configuration to the given patch options. + ApplyToMergeFrom(*MergeFromOptions) +} + +// MergeFromOptions contains options to generate a merge-from patch data. +type MergeFromOptions struct { + // OptimisticLock, when true, includes `metadata.resourceVersion` into the final + // patch data. If the `resourceVersion` field doesn't match what's stored, + // the operation results in a conflict and clients will need to try again. + OptimisticLock bool +} + +type mergeFromPatch struct { + patchType types.PatchType + createPatch func(originalJSON, modifiedJSON []byte, dataStruct interface{}) ([]byte, error) + from Object + opts MergeFromOptions +} + +// Type implements Patch. +func (s *mergeFromPatch) Type() types.PatchType { + return s.patchType +} + +// Data implements Patch. +func (s *mergeFromPatch) Data(obj Object) ([]byte, error) { + original := s.from + modified := obj + + if s.opts.OptimisticLock { + version := original.GetResourceVersion() + if len(version) == 0 { + return nil, fmt.Errorf("cannot use OptimisticLock, object %q does not have any resource version we can use", original) + } + + original = original.DeepCopyObject().(Object) + original.SetResourceVersion("") + + modified = modified.DeepCopyObject().(Object) + modified.SetResourceVersion(version) + } + + originalJSON, err := json.Marshal(original) + if err != nil { + return nil, err + } + + modifiedJSON, err := json.Marshal(modified) + if err != nil { + return nil, err + } + + data, err := s.createPatch(originalJSON, modifiedJSON, obj) + if err != nil { + return nil, err + } + + return data, nil +} + +func createMergePatch(originalJSON, modifiedJSON []byte, _ interface{}) ([]byte, error) { + return jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) +} + +func createStrategicMergePatch(originalJSON, modifiedJSON []byte, dataStruct interface{}) ([]byte, error) { + return strategicpatch.CreateTwoWayMergePatch(originalJSON, modifiedJSON, dataStruct) +} + +// MergeFrom creates a Patch that patches using the merge-patch strategy with the given object as base. +// The difference between MergeFrom and StrategicMergeFrom lays in the handling of modified list fields. +// When using MergeFrom, existing lists will be completely replaced by new lists. +// When using StrategicMergeFrom, the list field's `patchStrategy` is respected if specified in the API type, +// e.g. the existing list is not replaced completely but rather merged with the new one using the list's `patchMergeKey`. +// See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ for more details on +// the difference between merge-patch and strategic-merge-patch. +func MergeFrom(obj Object) Patch { + return &mergeFromPatch{patchType: types.MergePatchType, createPatch: createMergePatch, from: obj} +} + +// MergeFromWithOptions creates a Patch that patches using the merge-patch strategy with the given object as base. +// See MergeFrom for more details. +func MergeFromWithOptions(obj Object, opts ...MergeFromOption) Patch { + options := &MergeFromOptions{} + for _, opt := range opts { + opt.ApplyToMergeFrom(options) + } + return &mergeFromPatch{patchType: types.MergePatchType, createPatch: createMergePatch, from: obj, opts: *options} +} + +// StrategicMergeFrom creates a Patch that patches using the strategic-merge-patch strategy with the given object as base. +// The difference between MergeFrom and StrategicMergeFrom lays in the handling of modified list fields. +// When using MergeFrom, existing lists will be completely replaced by new lists. +// When using StrategicMergeFrom, the list field's `patchStrategy` is respected if specified in the API type, +// e.g. the existing list is not replaced completely but rather merged with the new one using the list's `patchMergeKey`. +// See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ for more details on +// the difference between merge-patch and strategic-merge-patch. +// Please note, that CRDs don't support strategic-merge-patch, see +// https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility +func StrategicMergeFrom(obj Object, opts ...MergeFromOption) Patch { + options := &MergeFromOptions{} + for _, opt := range opts { + opt.ApplyToMergeFrom(options) + } + return &mergeFromPatch{patchType: types.StrategicMergePatchType, createPatch: createStrategicMergePatch, from: obj, opts: *options} +} + +// mergePatch uses a raw merge strategy to patch the object. +type mergePatch struct{} + +// Type implements Patch. +func (p mergePatch) Type() types.PatchType { + return types.MergePatchType +} + +// Data implements Patch. +func (p mergePatch) Data(obj Object) ([]byte, error) { + // NB(directxman12): we might technically want to be using an actual encoder + // here (in case some more performant encoder is introduced) but this is + // correct and sufficient for our uses (it's what the JSON serializer in + // client-go does, more-or-less). + return json.Marshal(obj) +} + +// applyPatch uses server-side apply to patch the object. +type applyPatch struct{} + +// Type implements Patch. +func (p applyPatch) Type() types.PatchType { + return types.ApplyPatchType +} + +// Data implements Patch. +func (p applyPatch) Data(obj Object) ([]byte, error) { + // NB(directxman12): we might technically want to be using an actual encoder + // here (in case some more performant encoder is introduced) but this is + // correct and sufficient for our uses (it's what the JSON serializer in + // client-go does, more-or-less). + return json.Marshal(obj) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go new file mode 100644 index 000000000..bf4b861f3 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go @@ -0,0 +1,141 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// NewDelegatingClientInput encapsulates the input parameters to create a new delegating client. +type NewDelegatingClientInput struct { + CacheReader Reader + Client Client + UncachedObjects []Object + CacheUnstructured bool +} + +// NewDelegatingClient creates a new delegating client. +// +// A delegating client forms a Client by composing separate reader, writer and +// statusclient interfaces. This way, you can have an Client that reads from a +// cache and writes to the API server. +func NewDelegatingClient(in NewDelegatingClientInput) (Client, error) { + uncachedGVKs := map[schema.GroupVersionKind]struct{}{} + for _, obj := range in.UncachedObjects { + gvk, err := apiutil.GVKForObject(obj, in.Client.Scheme()) + if err != nil { + return nil, err + } + uncachedGVKs[gvk] = struct{}{} + } + + return &delegatingClient{ + scheme: in.Client.Scheme(), + mapper: in.Client.RESTMapper(), + Reader: &delegatingReader{ + CacheReader: in.CacheReader, + ClientReader: in.Client, + scheme: in.Client.Scheme(), + uncachedGVKs: uncachedGVKs, + cacheUnstructured: in.CacheUnstructured, + }, + Writer: in.Client, + StatusClient: in.Client, + }, nil +} + +type delegatingClient struct { + Reader + Writer + StatusClient + + scheme *runtime.Scheme + mapper meta.RESTMapper +} + +// Scheme returns the scheme this client is using. +func (d *delegatingClient) Scheme() *runtime.Scheme { + return d.scheme +} + +// RESTMapper returns the rest mapper this client is using. +func (d *delegatingClient) RESTMapper() meta.RESTMapper { + return d.mapper +} + +// delegatingReader forms a Reader that will cause Get and List requests for +// unstructured types to use the ClientReader while requests for any other type +// of object with use the CacheReader. This avoids accidentally caching the +// entire cluster in the common case of loading arbitrary unstructured objects +// (e.g. from OwnerReferences). +type delegatingReader struct { + CacheReader Reader + ClientReader Reader + + uncachedGVKs map[schema.GroupVersionKind]struct{} + scheme *runtime.Scheme + cacheUnstructured bool +} + +func (d *delegatingReader) shouldBypassCache(obj runtime.Object) (bool, error) { + gvk, err := apiutil.GVKForObject(obj, d.scheme) + if err != nil { + return false, err + } + // TODO: this is producing unsafe guesses that don't actually work, + // but it matches ~99% of the cases out there. + if meta.IsListType(obj) { + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + } + if _, isUncached := d.uncachedGVKs[gvk]; isUncached { + return true, nil + } + if !d.cacheUnstructured { + _, isUnstructured := obj.(*unstructured.Unstructured) + _, isUnstructuredList := obj.(*unstructured.UnstructuredList) + return isUnstructured || isUnstructuredList, nil + } + return false, nil +} + +// Get retrieves an obj for a given object key from the Kubernetes Cluster. +func (d *delegatingReader) Get(ctx context.Context, key ObjectKey, obj Object) error { + if isUncached, err := d.shouldBypassCache(obj); err != nil { + return err + } else if isUncached { + return d.ClientReader.Get(ctx, key, obj) + } + return d.CacheReader.Get(ctx, key, obj) +} + +// List retrieves list of objects for a given namespace and list options. +func (d *delegatingReader) List(ctx context.Context, list ObjectList, opts ...ListOption) error { + if isUncached, err := d.shouldBypassCache(list); err != nil { + return err + } else if isUncached { + return d.ClientReader.List(ctx, list, opts...) + } + return d.CacheReader.List(ctx, list, opts...) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go new file mode 100644 index 000000000..dde7b21f2 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go @@ -0,0 +1,205 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" +) + +var _ Reader = &typedClient{} +var _ Writer = &typedClient{} +var _ StatusWriter = &typedClient{} + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type typedClient struct { + cache *clientCache + paramCodec runtime.ParameterCodec +} + +// Create implements client.Client. +func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + createOpts := &CreateOptions{} + createOpts.ApplyOptions(opts) + return o.Post(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Body(obj). + VersionedParams(createOpts.AsCreateOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// Update implements client.Client. +func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + updateOpts := &UpdateOptions{} + updateOpts.ApplyOptions(opts) + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(obj). + VersionedParams(updateOpts.AsUpdateOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// Delete implements client.Client. +func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(deleteOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// DeleteAllOf implements client.Client. +func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()). + Resource(o.resource()). + VersionedParams(deleteAllOfOpts.AsListOptions(), c.paramCodec). + Body(deleteAllOfOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// Patch implements client.Client. +func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec). + Body(data). + Do(ctx). + Into(obj) +} + +// Get implements client.Client. +func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + r, err := c.cache.getResource(obj) + if err != nil { + return err + } + return r.Get(). + NamespaceIfScoped(key.Namespace, r.isNamespaced()). + Resource(r.resource()). + Name(key.Name).Do(ctx).Into(obj) +} + +// List implements client.Client. +func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + r, err := c.cache.getResource(obj) + if err != nil { + return err + } + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// UpdateStatus used by StatusWriter to write status. +func (c *typedClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + // TODO(droot): examine the returned error and check if it error needs to be + // wrapped to improve the UX ? + // It will be nice to receive an error saying the object doesn't implement + // status subresource and check CRD definition + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(obj). + VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// PatchStatus used by StatusWriter to write status. +func (c *typedClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(data). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go new file mode 100644 index 000000000..819527e70 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go @@ -0,0 +1,271 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +var _ Reader = &unstructuredClient{} +var _ Writer = &unstructuredClient{} +var _ StatusWriter = &unstructuredClient{} + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type unstructuredClient struct { + cache *clientCache + paramCodec runtime.ParameterCodec +} + +// Create implements client.Client. +func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + createOpts := &CreateOptions{} + createOpts.ApplyOptions(opts) + result := o.Post(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Body(obj). + VersionedParams(createOpts.AsCreateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + return result +} + +// Update implements client.Client. +func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + updateOpts := UpdateOptions{} + updateOpts.ApplyOptions(opts) + result := o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(obj). + VersionedParams(updateOpts.AsUpdateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + return result +} + +// Delete implements client.Client. +func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + if _, ok := obj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + return o.Delete(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(deleteOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// DeleteAllOf implements client.Client. +func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + if _, ok := obj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + return o.Delete(). + NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()). + Resource(o.resource()). + VersionedParams(deleteAllOfOpts.AsListOptions(), uc.paramCodec). + Body(deleteAllOfOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// Patch implements client.Client. +func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + if _, ok := obj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), uc.paramCodec). + Body(data). + Do(ctx). + Into(obj) +} + +// Get implements client.Client. +func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + r, err := uc.cache.getResource(obj) + if err != nil { + return err + } + + result := r.Get(). + NamespaceIfScoped(key.Namespace, r.isNamespaced()). + Resource(r.resource()). + Name(key.Name). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + + return result +} + +// List implements client.Client. +func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + u, ok := obj.(*unstructured.UnstructuredList) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + + r, err := uc.cache.getResource(obj) + if err != nil { + return err + } + + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), uc.paramCodec). + Do(ctx). + Into(obj) +} + +func (uc *unstructuredClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error { + if _, ok := obj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(obj). + VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) +} + +func (uc *unstructuredClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + result := o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(data). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), uc.paramCodec). + Do(ctx). + Into(u) + + u.SetGroupVersionKind(gvk) + return result +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go new file mode 100644 index 000000000..70490664b --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go @@ -0,0 +1,114 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" +) + +// NewWithWatch returns a new WithWatch. +func NewWithWatch(config *rest.Config, options Options) (WithWatch, error) { + client, err := newClient(config, options) + if err != nil { + return nil, err + } + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + return nil, err + } + return &watchingClient{client: client, dynamic: dynamicClient}, nil +} + +type watchingClient struct { + *client + dynamic dynamic.Interface +} + +func (w *watchingClient) Watch(ctx context.Context, list ObjectList, opts ...ListOption) (watch.Interface, error) { + switch l := list.(type) { + case *unstructured.UnstructuredList: + return w.unstructuredWatch(ctx, l, opts...) + case *metav1.PartialObjectMetadataList: + return w.metadataWatch(ctx, l, opts...) + default: + return w.typedWatch(ctx, l, opts...) + } +} + +func (w *watchingClient) listOpts(opts ...ListOption) ListOptions { + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + if listOpts.Raw == nil { + listOpts.Raw = &metav1.ListOptions{} + } + listOpts.Raw.Watch = true + + return listOpts +} + +func (w *watchingClient) metadataWatch(ctx context.Context, obj *metav1.PartialObjectMetadataList, opts ...ListOption) (watch.Interface, error) { + gvk := obj.GroupVersionKind() + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + listOpts := w.listOpts(opts...) + + resInt, err := w.client.metadataClient.getResourceInterface(gvk, listOpts.Namespace) + if err != nil { + return nil, err + } + + return resInt.Watch(ctx, *listOpts.AsListOptions()) +} + +func (w *watchingClient) unstructuredWatch(ctx context.Context, obj *unstructured.UnstructuredList, opts ...ListOption) (watch.Interface, error) { + gvk := obj.GroupVersionKind() + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + r, err := w.client.unstructuredClient.cache.getResource(obj) + if err != nil { + return nil, err + } + + listOpts := w.listOpts(opts...) + + if listOpts.Namespace != "" && r.isNamespaced() { + return w.dynamic.Resource(r.mapping.Resource).Namespace(listOpts.Namespace).Watch(ctx, *listOpts.AsListOptions()) + } + return w.dynamic.Resource(r.mapping.Resource).Watch(ctx, *listOpts.AsListOptions()) +} + +func (w *watchingClient) typedWatch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error) { + r, err := w.client.typedClient.cache.getResource(obj) + if err != nil { + return nil, err + } + + listOpts := w.listOpts(opts...) + + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), w.client.typedClient.paramCodec). + Watch(ctx) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go new file mode 100644 index 000000000..4b8ee8e7c --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go @@ -0,0 +1,270 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "context" + "errors" + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" +) + +// Cluster provides various methods to interact with a cluster. +type Cluster interface { + // SetFields will set any dependencies on an object for which the object has implemented the inject + // interface - e.g. inject.Client. + // Deprecated: use the equivalent Options field to set a field. This method will be removed in v0.10. + SetFields(interface{}) error + + // GetConfig returns an initialized Config + GetConfig() *rest.Config + + // GetScheme returns an initialized Scheme + GetScheme() *runtime.Scheme + + // GetClient returns a client configured with the Config. This client may + // not be a fully "direct" client -- it may read from a cache, for + // instance. See Options.NewClient for more information on how the default + // implementation works. + GetClient() client.Client + + // GetFieldIndexer returns a client.FieldIndexer configured with the client + GetFieldIndexer() client.FieldIndexer + + // GetCache returns a cache.Cache + GetCache() cache.Cache + + // GetEventRecorderFor returns a new EventRecorder for the provided name + GetEventRecorderFor(name string) record.EventRecorder + + // GetRESTMapper returns a RESTMapper + GetRESTMapper() meta.RESTMapper + + // GetAPIReader returns a reader that will be configured to use the API server. + // This should be used sparingly and only when the client does not fit your + // use case. + GetAPIReader() client.Reader + + // Start starts the cluster + Start(ctx context.Context) error +} + +// Options are the possible options that can be configured for a Cluster. +type Options struct { + // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources + // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better + // idea to pass your own scheme in. See the documentation in pkg/scheme for more information. + Scheme *runtime.Scheme + + // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs + MapperProvider func(c *rest.Config) (meta.RESTMapper, error) + + // Logger is the logger that should be used by this Cluster. + // If none is set, it defaults to log.Log global logger. + Logger logr.Logger + + // SyncPeriod determines the minimum frequency at which watched resources are + // reconciled. A lower period will correct entropy more quickly, but reduce + // responsiveness to change if there are many watched resources. Change this + // value only if you know what you are doing. Defaults to 10 hours if unset. + // there will a 10 percent jitter between the SyncPeriod of all controllers + // so that all controllers will not send list requests simultaneously. + SyncPeriod *time.Duration + + // Namespace if specified restricts the manager's cache to watch objects in + // the desired namespace Defaults to all namespaces + // + // Note: If a namespace is specified, controllers can still Watch for a + // cluster-scoped resource (e.g Node). For namespaced resources the cache + // will only hold objects from the desired namespace. + Namespace string + + // NewCache is the function that will create the cache to be used + // by the manager. If not set this will use the default new cache function. + NewCache cache.NewCacheFunc + + // NewClient is the func that creates the client to be used by the manager. + // If not set this will create the default DelegatingClient that will + // use the cache for reads and the client for writes. + NewClient NewClientFunc + + // ClientDisableCacheFor tells the client that, if any cache is used, to bypass it + // for the given objects. + ClientDisableCacheFor []client.Object + + // DryRunClient specifies whether the client should be configured to enforce + // dryRun mode. + DryRunClient bool + + // EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API + // Use this to customize the event correlator and spam filter + // + // Deprecated: using this may cause goroutine leaks if the lifetime of your manager or controllers + // is shorter than the lifetime of your process. + EventBroadcaster record.EventBroadcaster + + // makeBroadcaster allows deferring the creation of the broadcaster to + // avoid leaking goroutines if we never call Start on this manager. It also + // returns whether or not this is a "owned" broadcaster, and as such should be + // stopped with the manager. + makeBroadcaster intrec.EventBroadcasterProducer + + // Dependency injection for testing + newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) +} + +// Option can be used to manipulate Options. +type Option func(*Options) + +// New constructs a brand new cluster. +func New(config *rest.Config, opts ...Option) (Cluster, error) { + if config == nil { + return nil, errors.New("must specify Config") + } + + options := Options{} + for _, opt := range opts { + opt(&options) + } + options = setOptionsDefaults(options) + + // Create the mapper provider + mapper, err := options.MapperProvider(config) + if err != nil { + options.Logger.Error(err, "Failed to get API Group-Resources") + return nil, err + } + + // Create the cache for the cached read client and registering informers + cache, err := options.NewCache(config, cache.Options{Scheme: options.Scheme, Mapper: mapper, Resync: options.SyncPeriod, Namespace: options.Namespace}) + if err != nil { + return nil, err + } + + clientOptions := client.Options{Scheme: options.Scheme, Mapper: mapper} + + apiReader, err := client.New(config, clientOptions) + if err != nil { + return nil, err + } + + writeObj, err := options.NewClient(cache, config, clientOptions, options.ClientDisableCacheFor...) + if err != nil { + return nil, err + } + + if options.DryRunClient { + writeObj = client.NewDryRunClient(writeObj) + } + + // Create the recorder provider to inject event recorders for the components. + // TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific + // to the particular controller that it's being injected into, rather than a generic one like is here. + recorderProvider, err := options.newRecorderProvider(config, options.Scheme, options.Logger.WithName("events"), options.makeBroadcaster) + if err != nil { + return nil, err + } + + return &cluster{ + config: config, + scheme: options.Scheme, + cache: cache, + fieldIndexes: cache, + client: writeObj, + apiReader: apiReader, + recorderProvider: recorderProvider, + mapper: mapper, + logger: options.Logger, + }, nil +} + +// setOptionsDefaults set default values for Options fields. +func setOptionsDefaults(options Options) Options { + // Use the Kubernetes client-go scheme if none is specified + if options.Scheme == nil { + options.Scheme = scheme.Scheme + } + + if options.MapperProvider == nil { + options.MapperProvider = func(c *rest.Config) (meta.RESTMapper, error) { + return apiutil.NewDynamicRESTMapper(c) + } + } + + // Allow users to define how to create a new client + if options.NewClient == nil { + options.NewClient = DefaultNewClient + } + + // Allow newCache to be mocked + if options.NewCache == nil { + options.NewCache = cache.New + } + + // Allow newRecorderProvider to be mocked + if options.newRecorderProvider == nil { + options.newRecorderProvider = intrec.NewProvider + } + + // This is duplicated with pkg/manager, we need it here to provide + // the user with an EventBroadcaster and there for the Leader election + if options.EventBroadcaster == nil { + // defer initialization to avoid leaking by default + options.makeBroadcaster = func() (record.EventBroadcaster, bool) { + return record.NewBroadcaster(), true + } + } else { + options.makeBroadcaster = func() (record.EventBroadcaster, bool) { + return options.EventBroadcaster, false + } + } + + if options.Logger.GetSink() == nil { + options.Logger = logf.RuntimeLog.WithName("cluster") + } + + return options +} + +// NewClientFunc allows a user to define how to create a client. +type NewClientFunc func(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) + +// DefaultNewClient creates the default caching client. +func DefaultNewClient(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) { + c, err := client.New(config, options) + if err != nil { + return nil, err + } + + return client.NewDelegatingClient(client.NewDelegatingClientInput{ + CacheReader: cache, + Client: c, + UncachedObjects: uncachedObjects, + }) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go new file mode 100644 index 000000000..125e1d144 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go @@ -0,0 +1,128 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "context" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +type cluster struct { + // config is the rest.config used to talk to the apiserver. Required. + config *rest.Config + + // scheme is the scheme injected into Controllers, EventHandlers, Sources and Predicates. Defaults + // to scheme.scheme. + scheme *runtime.Scheme + + cache cache.Cache + + // TODO(directxman12): Provide an escape hatch to get individual indexers + // client is the client injected into Controllers (and EventHandlers, Sources and Predicates). + client client.Client + + // apiReader is the reader that will make requests to the api server and not the cache. + apiReader client.Reader + + // fieldIndexes knows how to add field indexes over the Cache used by this controller, + // which can later be consumed via field selectors from the injected client. + fieldIndexes client.FieldIndexer + + // recorderProvider is used to generate event recorders that will be injected into Controllers + // (and EventHandlers, Sources and Predicates). + recorderProvider *intrec.Provider + + // mapper is used to map resources to kind, and map kind and version. + mapper meta.RESTMapper + + // Logger is the logger that should be used by this manager. + // If none is set, it defaults to log.Log global logger. + logger logr.Logger +} + +func (c *cluster) SetFields(i interface{}) error { + if _, err := inject.ConfigInto(c.config, i); err != nil { + return err + } + if _, err := inject.ClientInto(c.client, i); err != nil { + return err + } + if _, err := inject.APIReaderInto(c.apiReader, i); err != nil { + return err + } + if _, err := inject.SchemeInto(c.scheme, i); err != nil { + return err + } + if _, err := inject.CacheInto(c.cache, i); err != nil { + return err + } + if _, err := inject.MapperInto(c.mapper, i); err != nil { + return err + } + return nil +} + +func (c *cluster) GetConfig() *rest.Config { + return c.config +} + +func (c *cluster) GetClient() client.Client { + return c.client +} + +func (c *cluster) GetScheme() *runtime.Scheme { + return c.scheme +} + +func (c *cluster) GetFieldIndexer() client.FieldIndexer { + return c.fieldIndexes +} + +func (c *cluster) GetCache() cache.Cache { + return c.cache +} + +func (c *cluster) GetEventRecorderFor(name string) record.EventRecorder { + return c.recorderProvider.GetEventRecorderFor(name) +} + +func (c *cluster) GetRESTMapper() meta.RESTMapper { + return c.mapper +} + +func (c *cluster) GetAPIReader() client.Reader { + return c.apiReader +} + +func (c *cluster) GetLogger() logr.Logger { + return c.logger +} + +func (c *cluster) Start(ctx context.Context) error { + defer c.recorderProvider.Stop(ctx) + return c.cache.Start(ctx) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go new file mode 100644 index 000000000..517b172e5 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go @@ -0,0 +1,112 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "os" + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" +) + +// ControllerManagerConfiguration defines the functions necessary to parse a config file +// and to configure the Options struct for the ctrl.Manager. +type ControllerManagerConfiguration interface { + runtime.Object + + // Complete returns the versioned configuration + Complete() (v1alpha1.ControllerManagerConfigurationSpec, error) +} + +// DeferredFileLoader is used to configure the decoder for loading controller +// runtime component config types. +type DeferredFileLoader struct { + ControllerManagerConfiguration + path string + scheme *runtime.Scheme + once sync.Once + err error +} + +// File will set up the deferred file loader for the configuration +// this will also configure the defaults for the loader if nothing is +// +// Defaults: +// Path: "./config.yaml" +// Kind: GenericControllerManagerConfiguration +func File() *DeferredFileLoader { + scheme := runtime.NewScheme() + utilruntime.Must(v1alpha1.AddToScheme(scheme)) + return &DeferredFileLoader{ + path: "./config.yaml", + ControllerManagerConfiguration: &v1alpha1.ControllerManagerConfiguration{}, + scheme: scheme, + } +} + +// Complete will use sync.Once to set the scheme. +func (d *DeferredFileLoader) Complete() (v1alpha1.ControllerManagerConfigurationSpec, error) { + d.once.Do(d.loadFile) + if d.err != nil { + return v1alpha1.ControllerManagerConfigurationSpec{}, d.err + } + return d.ControllerManagerConfiguration.Complete() +} + +// AtPath will set the path to load the file for the decoder. +func (d *DeferredFileLoader) AtPath(path string) *DeferredFileLoader { + d.path = path + return d +} + +// OfKind will set the type to be used for decoding the file into. +func (d *DeferredFileLoader) OfKind(obj ControllerManagerConfiguration) *DeferredFileLoader { + d.ControllerManagerConfiguration = obj + return d +} + +// InjectScheme will configure the scheme to be used for decoding the file. +func (d *DeferredFileLoader) InjectScheme(scheme *runtime.Scheme) error { + d.scheme = scheme + return nil +} + +// loadFile is used from the mutex.Once to load the file. +func (d *DeferredFileLoader) loadFile() { + if d.scheme == nil { + d.err = fmt.Errorf("scheme not supplied to controller configuration loader") + return + } + + content, err := os.ReadFile(d.path) + if err != nil { + d.err = fmt.Errorf("could not read file at %s", d.path) + return + } + + codecs := serializer.NewCodecFactory(d.scheme) + + // Regardless of if the bytes are of any external version, + // it will be read successfully and converted into the internal version + if err = runtime.DecodeInto(codecs.UniversalDecoder(), content, d.ControllerManagerConfiguration); err != nil { + d.err = fmt.Errorf("could not decode file into runtime.Object") + } +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go new file mode 100644 index 000000000..ebd8243f3 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package config contains functionality for interacting with ComponentConfig +// files +// +// DeferredFileLoader +// +// This uses a deferred file decoding allowing you to chain your configuration +// setup. You can pass this into manager.Options#File and it will load your +// config. +package config diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go new file mode 100644 index 000000000..1e3adbafb --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 provides the ControllerManagerConfiguration used for +// configuring ctrl.Manager +// +kubebuilder:object:generate=true +package v1alpha1 diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go new file mode 100644 index 000000000..9efdbc066 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go @@ -0,0 +1,37 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "controller-runtime.sigs.k8s.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +func init() { + SchemeBuilder.Register(&ControllerManagerConfiguration{}) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go new file mode 100644 index 000000000..e67b62e51 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go @@ -0,0 +1,157 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + configv1alpha1 "k8s.io/component-base/config/v1alpha1" +) + +// ControllerManagerConfigurationSpec defines the desired state of GenericControllerManagerConfiguration. +type ControllerManagerConfigurationSpec struct { + // SyncPeriod determines the minimum frequency at which watched resources are + // reconciled. A lower period will correct entropy more quickly, but reduce + // responsiveness to change if there are many watched resources. Change this + // value only if you know what you are doing. Defaults to 10 hours if unset. + // there will a 10 percent jitter between the SyncPeriod of all controllers + // so that all controllers will not send list requests simultaneously. + // +optional + SyncPeriod *metav1.Duration `json:"syncPeriod,omitempty"` + + // LeaderElection is the LeaderElection config to be used when configuring + // the manager.Manager leader election + // +optional + LeaderElection *configv1alpha1.LeaderElectionConfiguration `json:"leaderElection,omitempty"` + + // CacheNamespace if specified restricts the manager's cache to watch objects in + // the desired namespace Defaults to all namespaces + // + // Note: If a namespace is specified, controllers can still Watch for a + // cluster-scoped resource (e.g Node). For namespaced resources the cache + // will only hold objects from the desired namespace. + // +optional + CacheNamespace string `json:"cacheNamespace,omitempty"` + + // GracefulShutdownTimeout is the duration given to runnable to stop before the manager actually returns on stop. + // To disable graceful shutdown, set to time.Duration(0) + // To use graceful shutdown without timeout, set to a negative duration, e.G. time.Duration(-1) + // The graceful shutdown is skipped for safety reasons in case the leader election lease is lost. + GracefulShutdownTimeout *metav1.Duration `json:"gracefulShutDown,omitempty"` + + // Controller contains global configuration options for controllers + // registered within this manager. + // +optional + Controller *ControllerConfigurationSpec `json:"controller,omitempty"` + + // Metrics contains thw controller metrics configuration + // +optional + Metrics ControllerMetrics `json:"metrics,omitempty"` + + // Health contains the controller health configuration + // +optional + Health ControllerHealth `json:"health,omitempty"` + + // Webhook contains the controllers webhook configuration + // +optional + Webhook ControllerWebhook `json:"webhook,omitempty"` +} + +// ControllerConfigurationSpec defines the global configuration for +// controllers registered with the manager. +type ControllerConfigurationSpec struct { + // GroupKindConcurrency is a map from a Kind to the number of concurrent reconciliation + // allowed for that controller. + // + // When a controller is registered within this manager using the builder utilities, + // users have to specify the type the controller reconciles in the For(...) call. + // If the object's kind passed matches one of the keys in this map, the concurrency + // for that controller is set to the number specified. + // + // The key is expected to be consistent in form with GroupKind.String(), + // e.g. ReplicaSet in apps group (regardless of version) would be `ReplicaSet.apps`. + // + // +optional + GroupKindConcurrency map[string]int `json:"groupKindConcurrency,omitempty"` + + // CacheSyncTimeout refers to the time limit set to wait for syncing caches. + // Defaults to 2 minutes if not set. + // +optional + CacheSyncTimeout *time.Duration `json:"cacheSyncTimeout,omitempty"` +} + +// ControllerMetrics defines the metrics configs. +type ControllerMetrics struct { + // BindAddress is the TCP address that the controller should bind to + // for serving prometheus metrics. + // It can be set to "0" to disable the metrics serving. + // +optional + BindAddress string `json:"bindAddress,omitempty"` +} + +// ControllerHealth defines the health configs. +type ControllerHealth struct { + // HealthProbeBindAddress is the TCP address that the controller should bind to + // for serving health probes + // +optional + HealthProbeBindAddress string `json:"healthProbeBindAddress,omitempty"` + + // ReadinessEndpointName, defaults to "readyz" + // +optional + ReadinessEndpointName string `json:"readinessEndpointName,omitempty"` + + // LivenessEndpointName, defaults to "healthz" + // +optional + LivenessEndpointName string `json:"livenessEndpointName,omitempty"` +} + +// ControllerWebhook defines the webhook server for the controller. +type ControllerWebhook struct { + // Port is the port that the webhook server serves at. + // It is used to set webhook.Server.Port. + // +optional + Port *int `json:"port,omitempty"` + + // Host is the hostname that the webhook server binds to. + // It is used to set webhook.Server.Host. + // +optional + Host string `json:"host,omitempty"` + + // CertDir is the directory that contains the server key and certificate. + // if not set, webhook server would look up the server key and certificate in + // {TempDir}/k8s-webhook-server/serving-certs. The server key and certificate + // must be named tls.key and tls.crt, respectively. + // +optional + CertDir string `json:"certDir,omitempty"` +} + +// +kubebuilder:object:root=true + +// ControllerManagerConfiguration is the Schema for the GenericControllerManagerConfigurations API. +type ControllerManagerConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // ControllerManagerConfiguration returns the contfigurations for controllers + ControllerManagerConfigurationSpec `json:",inline"` +} + +// Complete returns the configuration for controller-runtime. +func (c *ControllerManagerConfigurationSpec) Complete() (ControllerManagerConfigurationSpec, error) { + return *c, nil +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..5329bef66 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,153 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + configv1alpha1 "k8s.io/component-base/config/v1alpha1" + timex "time" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerConfigurationSpec) DeepCopyInto(out *ControllerConfigurationSpec) { + *out = *in + if in.GroupKindConcurrency != nil { + in, out := &in.GroupKindConcurrency, &out.GroupKindConcurrency + *out = make(map[string]int, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.CacheSyncTimeout != nil { + in, out := &in.CacheSyncTimeout, &out.CacheSyncTimeout + *out = new(timex.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerConfigurationSpec. +func (in *ControllerConfigurationSpec) DeepCopy() *ControllerConfigurationSpec { + if in == nil { + return nil + } + out := new(ControllerConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerHealth) DeepCopyInto(out *ControllerHealth) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerHealth. +func (in *ControllerHealth) DeepCopy() *ControllerHealth { + if in == nil { + return nil + } + out := new(ControllerHealth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerManagerConfiguration) DeepCopyInto(out *ControllerManagerConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ControllerManagerConfigurationSpec.DeepCopyInto(&out.ControllerManagerConfigurationSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerManagerConfiguration. +func (in *ControllerManagerConfiguration) DeepCopy() *ControllerManagerConfiguration { + if in == nil { + return nil + } + out := new(ControllerManagerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerManagerConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerManagerConfigurationSpec) DeepCopyInto(out *ControllerManagerConfigurationSpec) { + *out = *in + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(v1.Duration) + **out = **in + } + if in.LeaderElection != nil { + in, out := &in.LeaderElection, &out.LeaderElection + *out = new(configv1alpha1.LeaderElectionConfiguration) + (*in).DeepCopyInto(*out) + } + if in.GracefulShutdownTimeout != nil { + in, out := &in.GracefulShutdownTimeout, &out.GracefulShutdownTimeout + *out = new(v1.Duration) + **out = **in + } + if in.Controller != nil { + in, out := &in.Controller, &out.Controller + *out = new(ControllerConfigurationSpec) + (*in).DeepCopyInto(*out) + } + out.Metrics = in.Metrics + out.Health = in.Health + in.Webhook.DeepCopyInto(&out.Webhook) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerManagerConfigurationSpec. +func (in *ControllerManagerConfigurationSpec) DeepCopy() *ControllerManagerConfigurationSpec { + if in == nil { + return nil + } + out := new(ControllerManagerConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerMetrics) DeepCopyInto(out *ControllerMetrics) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerMetrics. +func (in *ControllerMetrics) DeepCopy() *ControllerMetrics { + if in == nil { + return nil + } + out := new(ControllerMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerWebhook) DeepCopyInto(out *ControllerWebhook) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerWebhook. +func (in *ControllerWebhook) DeepCopy() *ControllerWebhook { + if in == nil { + return nil + } + out := new(ControllerWebhook) + in.DeepCopyInto(out) + return out +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go new file mode 100644 index 000000000..8e3d8591d --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go @@ -0,0 +1,155 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "time" + + "github.com/go-logr/logr" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/internal/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/ratelimiter" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +// Options are the arguments for creating a new Controller. +type Options struct { + // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1. + MaxConcurrentReconciles int + + // Reconciler reconciles an object + Reconciler reconcile.Reconciler + + // RateLimiter is used to limit how frequently requests may be queued. + // Defaults to MaxOfRateLimiter which has both overall and per-item rate limiting. + // The overall is a token bucket and the per-item is exponential. + RateLimiter ratelimiter.RateLimiter + + // LogConstructor is used to construct a logger used for this controller and passed + // to each reconciliation via the context field. + LogConstructor func(request *reconcile.Request) logr.Logger + + // CacheSyncTimeout refers to the time limit set to wait for syncing caches. + // Defaults to 2 minutes if not set. + CacheSyncTimeout time.Duration + + // RecoverPanic indicates whether the panic caused by reconcile should be recovered. + RecoverPanic bool +} + +// Controller implements a Kubernetes API. A Controller manages a work queue fed reconcile.Requests +// from source.Sources. Work is performed through the reconcile.Reconciler for each enqueued item. +// Work typically is reads and writes Kubernetes objects to make the system state match the state specified +// in the object Spec. +type Controller interface { + // Reconciler is called to reconcile an object by Namespace/Name + reconcile.Reconciler + + // Watch takes events provided by a Source and uses the EventHandler to + // enqueue reconcile.Requests in response to the events. + // + // Watch may be provided one or more Predicates to filter events before + // they are given to the EventHandler. Events will be passed to the + // EventHandler if all provided Predicates evaluate to true. + Watch(src source.Source, eventhandler handler.EventHandler, predicates ...predicate.Predicate) error + + // Start starts the controller. Start blocks until the context is closed or a + // controller has an error starting. + Start(ctx context.Context) error + + // GetLogger returns this controller logger prefilled with basic information. + GetLogger() logr.Logger +} + +// New returns a new Controller registered with the Manager. The Manager will ensure that shared Caches have +// been synced before the Controller is Started. +func New(name string, mgr manager.Manager, options Options) (Controller, error) { + c, err := NewUnmanaged(name, mgr, options) + if err != nil { + return nil, err + } + + // Add the controller as a Manager components + return c, mgr.Add(c) +} + +// NewUnmanaged returns a new controller without adding it to the manager. The +// caller is responsible for starting the returned controller. +func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller, error) { + if options.Reconciler == nil { + return nil, fmt.Errorf("must specify Reconciler") + } + + if len(name) == 0 { + return nil, fmt.Errorf("must specify Name for Controller") + } + + if options.LogConstructor == nil { + log := mgr.GetLogger().WithValues( + "controller", name, + ) + options.LogConstructor = func(req *reconcile.Request) logr.Logger { + log := log + if req != nil { + log = log.WithValues( + "object", klog.KRef(req.Namespace, req.Name), + "namespace", req.Namespace, "name", req.Name, + ) + } + return log + } + } + + if options.MaxConcurrentReconciles <= 0 { + options.MaxConcurrentReconciles = 1 + } + + if options.CacheSyncTimeout == 0 { + options.CacheSyncTimeout = 2 * time.Minute + } + + if options.RateLimiter == nil { + options.RateLimiter = workqueue.DefaultControllerRateLimiter() + } + + // Inject dependencies into Reconciler + if err := mgr.SetFields(options.Reconciler); err != nil { + return nil, err + } + + // Create controller with dependencies set + return &controller.Controller{ + Do: options.Reconciler, + MakeQueue: func() workqueue.RateLimitingInterface { + return workqueue.NewNamedRateLimitingQueue(options.RateLimiter, name) + }, + MaxConcurrentReconciles: options.MaxConcurrentReconciles, + CacheSyncTimeout: options.CacheSyncTimeout, + SetFields: mgr.SetFields, + Name: name, + LogConstructor: options.LogConstructor, + RecoverPanic: options.RecoverPanic, + }, nil +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go new file mode 100644 index 000000000..aa53a77d4 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go @@ -0,0 +1,394 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllerutil + +import ( + "context" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// AlreadyOwnedError is an error returned if the object you are trying to assign +// a controller reference is already owned by another controller Object is the +// subject and Owner is the reference for the current owner. +type AlreadyOwnedError struct { + Object metav1.Object + Owner metav1.OwnerReference +} + +func (e *AlreadyOwnedError) Error() string { + return fmt.Sprintf("Object %s/%s is already owned by another %s controller %s", e.Object.GetNamespace(), e.Object.GetName(), e.Owner.Kind, e.Owner.Name) +} + +func newAlreadyOwnedError(obj metav1.Object, owner metav1.OwnerReference) *AlreadyOwnedError { + return &AlreadyOwnedError{ + Object: obj, + Owner: owner, + } +} + +// SetControllerReference sets owner as a Controller OwnerReference on controlled. +// This is used for garbage collection of the controlled object and for +// reconciling the owner object on changes to controlled (with a Watch + EnqueueRequestForOwner). +// Since only one OwnerReference can be a controller, it returns an error if +// there is another OwnerReference with Controller flag set. +func SetControllerReference(owner, controlled metav1.Object, scheme *runtime.Scheme) error { + // Validate the owner. + ro, ok := owner.(runtime.Object) + if !ok { + return fmt.Errorf("%T is not a runtime.Object, cannot call SetControllerReference", owner) + } + if err := validateOwner(owner, controlled); err != nil { + return err + } + + // Create a new controller ref. + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return err + } + ref := metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + Name: owner.GetName(), + UID: owner.GetUID(), + BlockOwnerDeletion: pointer.BoolPtr(true), + Controller: pointer.BoolPtr(true), + } + + // Return early with an error if the object is already controlled. + if existing := metav1.GetControllerOf(controlled); existing != nil && !referSameObject(*existing, ref) { + return newAlreadyOwnedError(controlled, *existing) + } + + // Update owner references and return. + upsertOwnerRef(ref, controlled) + return nil +} + +// SetOwnerReference is a helper method to make sure the given object contains an object reference to the object provided. +// This allows you to declare that owner has a dependency on the object without specifying it as a controller. +// If a reference to the same object already exists, it'll be overwritten with the newly provided version. +func SetOwnerReference(owner, object metav1.Object, scheme *runtime.Scheme) error { + // Validate the owner. + ro, ok := owner.(runtime.Object) + if !ok { + return fmt.Errorf("%T is not a runtime.Object, cannot call SetOwnerReference", owner) + } + if err := validateOwner(owner, object); err != nil { + return err + } + + // Create a new owner ref. + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return err + } + ref := metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + UID: owner.GetUID(), + Name: owner.GetName(), + } + + // Update owner references and return. + upsertOwnerRef(ref, object) + return nil +} + +func upsertOwnerRef(ref metav1.OwnerReference, object metav1.Object) { + owners := object.GetOwnerReferences() + if idx := indexOwnerRef(owners, ref); idx == -1 { + owners = append(owners, ref) + } else { + owners[idx] = ref + } + object.SetOwnerReferences(owners) +} + +// indexOwnerRef returns the index of the owner reference in the slice if found, or -1. +func indexOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) int { + for index, r := range ownerReferences { + if referSameObject(r, ref) { + return index + } + } + return -1 +} + +func validateOwner(owner, object metav1.Object) error { + ownerNs := owner.GetNamespace() + if ownerNs != "" { + objNs := object.GetNamespace() + if objNs == "" { + return fmt.Errorf("cluster-scoped resource must not have a namespace-scoped owner, owner's namespace %s", ownerNs) + } + if ownerNs != objNs { + return fmt.Errorf("cross-namespace owner references are disallowed, owner's namespace %s, obj's namespace %s", owner.GetNamespace(), object.GetNamespace()) + } + } + return nil +} + +// Returns true if a and b point to the same object. +func referSameObject(a, b metav1.OwnerReference) bool { + aGV, err := schema.ParseGroupVersion(a.APIVersion) + if err != nil { + return false + } + + bGV, err := schema.ParseGroupVersion(b.APIVersion) + if err != nil { + return false + } + + return aGV.Group == bGV.Group && a.Kind == b.Kind && a.Name == b.Name +} + +// OperationResult is the action result of a CreateOrUpdate call. +type OperationResult string + +const ( // They should complete the sentence "Deployment default/foo has been ..." + // OperationResultNone means that the resource has not been changed. + OperationResultNone OperationResult = "unchanged" + // OperationResultCreated means that a new resource is created. + OperationResultCreated OperationResult = "created" + // OperationResultUpdated means that an existing resource is updated. + OperationResultUpdated OperationResult = "updated" + // OperationResultUpdatedStatus means that an existing resource and its status is updated. + OperationResultUpdatedStatus OperationResult = "updatedStatus" + // OperationResultUpdatedStatusOnly means that only an existing status is updated. + OperationResultUpdatedStatusOnly OperationResult = "updatedStatusOnly" +) + +// CreateOrUpdate creates or updates the given object in the Kubernetes +// cluster. The object's desired state must be reconciled with the existing +// state inside the passed in callback MutateFn. +// +// The MutateFn is called regardless of creating or updating an object. +// +// It returns the executed operation and an error. +func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f MutateFn) (OperationResult, error) { + key := client.ObjectKeyFromObject(obj) + if err := c.Get(ctx, key, obj); err != nil { + if !apierrors.IsNotFound(err) { + return OperationResultNone, err + } + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + if err := c.Create(ctx, obj); err != nil { + return OperationResultNone, err + } + return OperationResultCreated, nil + } + + existing := obj.DeepCopyObject() //nolint + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + + if equality.Semantic.DeepEqual(existing, obj) { + return OperationResultNone, nil + } + + if err := c.Update(ctx, obj); err != nil { + return OperationResultNone, err + } + return OperationResultUpdated, nil +} + +// CreateOrPatch creates or patches the given object in the Kubernetes +// cluster. The object's desired state must be reconciled with the before +// state inside the passed in callback MutateFn. +// +// The MutateFn is called regardless of creating or updating an object. +// +// It returns the executed operation and an error. +func CreateOrPatch(ctx context.Context, c client.Client, obj client.Object, f MutateFn) (OperationResult, error) { + key := client.ObjectKeyFromObject(obj) + if err := c.Get(ctx, key, obj); err != nil { + if !apierrors.IsNotFound(err) { + return OperationResultNone, err + } + if f != nil { + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + } + if err := c.Create(ctx, obj); err != nil { + return OperationResultNone, err + } + return OperationResultCreated, nil + } + + // Create patches for the object and its possible status. + objPatch := client.MergeFrom(obj.DeepCopyObject().(client.Object)) + statusPatch := client.MergeFrom(obj.DeepCopyObject().(client.Object)) + + // Create a copy of the original object as well as converting that copy to + // unstructured data. + before, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj.DeepCopyObject()) + if err != nil { + return OperationResultNone, err + } + + // Attempt to extract the status from the resource for easier comparison later + beforeStatus, hasBeforeStatus, err := unstructured.NestedFieldCopy(before, "status") + if err != nil { + return OperationResultNone, err + } + + // If the resource contains a status then remove it from the unstructured + // copy to avoid unnecessary patching later. + if hasBeforeStatus { + unstructured.RemoveNestedField(before, "status") + } + + // Mutate the original object. + if f != nil { + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + } + + // Convert the resource to unstructured to compare against our before copy. + after, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return OperationResultNone, err + } + + // Attempt to extract the status from the resource for easier comparison later + afterStatus, hasAfterStatus, err := unstructured.NestedFieldCopy(after, "status") + if err != nil { + return OperationResultNone, err + } + + // If the resource contains a status then remove it from the unstructured + // copy to avoid unnecessary patching later. + if hasAfterStatus { + unstructured.RemoveNestedField(after, "status") + } + + result := OperationResultNone + + if !reflect.DeepEqual(before, after) { + // Only issue a Patch if the before and after resources (minus status) differ + if err := c.Patch(ctx, obj, objPatch); err != nil { + return result, err + } + result = OperationResultUpdated + } + + if (hasBeforeStatus || hasAfterStatus) && !reflect.DeepEqual(beforeStatus, afterStatus) { + // Only issue a Status Patch if the resource has a status and the beforeStatus + // and afterStatus copies differ + if result == OperationResultUpdated { + // If Status was replaced by Patch before, set it to afterStatus + objectAfterPatch, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return result, err + } + if err = unstructured.SetNestedField(objectAfterPatch, afterStatus, "status"); err != nil { + return result, err + } + // If Status was replaced by Patch before, restore patched structure to the obj + if err = runtime.DefaultUnstructuredConverter.FromUnstructured(objectAfterPatch, obj); err != nil { + return result, err + } + } + if err := c.Status().Patch(ctx, obj, statusPatch); err != nil { + return result, err + } + if result == OperationResultUpdated { + result = OperationResultUpdatedStatus + } else { + result = OperationResultUpdatedStatusOnly + } + } + + return result, nil +} + +// mutate wraps a MutateFn and applies validation to its result. +func mutate(f MutateFn, key client.ObjectKey, obj client.Object) error { + if err := f(); err != nil { + return err + } + if newKey := client.ObjectKeyFromObject(obj); key != newKey { + return fmt.Errorf("MutateFn cannot mutate object name and/or object namespace") + } + return nil +} + +// MutateFn is a function which mutates the existing object into its desired state. +type MutateFn func() error + +// AddFinalizer accepts an Object and adds the provided finalizer if not present. +// It returns an indication of whether it updated the object's list of finalizers. +func AddFinalizer(o client.Object, finalizer string) (finalizersUpdated bool) { + f := o.GetFinalizers() + for _, e := range f { + if e == finalizer { + return false + } + } + o.SetFinalizers(append(f, finalizer)) + return true +} + +// RemoveFinalizer accepts an Object and removes the provided finalizer if present. +// It returns an indication of whether it updated the object's list of finalizers. +func RemoveFinalizer(o client.Object, finalizer string) (finalizersUpdated bool) { + f := o.GetFinalizers() + for i := 0; i < len(f); i++ { + if f[i] == finalizer { + f = append(f[:i], f[i+1:]...) + i-- + finalizersUpdated = true + } + } + o.SetFinalizers(f) + return +} + +// ContainsFinalizer checks an Object that the provided finalizer is present. +func ContainsFinalizer(o client.Object, finalizer string) bool { + f := o.GetFinalizers() + for _, e := range f { + if e == finalizer { + return true + } + } + return false +} + +// Object allows functions to work indistinctly with any resource that +// implements both Object interfaces. +// +// Deprecated: Use client.Object instead. +type Object = client.Object diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go new file mode 100644 index 000000000..ab386b29c --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package controllerutil contains utility functions for working with and implementing Controllers. +*/ +package controllerutil diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/controller/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/controller/doc.go new file mode 100644 index 000000000..667b14fdd --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/controller/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package controller provides types and functions for building Controllers. Controllers implement Kubernetes APIs. + +Creation + +To create a new Controller, first create a manager.Manager and pass it to the controller.New function. +The Controller MUST be started by calling Manager.Start. +*/ +package controller diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/conversion/conversion.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/conversion/conversion.go new file mode 100644 index 000000000..da32ab48e --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/conversion/conversion.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package conversion provides interface definitions that an API Type needs to +implement for it to be supported by the generic conversion webhook handler +defined under pkg/webhook/conversion. +*/ +package conversion + +import "k8s.io/apimachinery/pkg/runtime" + +// Convertible defines capability of a type to convertible i.e. it can be converted to/from a hub type. +type Convertible interface { + runtime.Object + ConvertTo(dst Hub) error + ConvertFrom(src Hub) error +} + +// Hub marks that a given type is the hub type for conversion. This means that +// all conversions will first convert to the hub type, then convert from the hub +// type to the destination type. All types besides the hub type should implement +// Convertible. +type Hub interface { + runtime.Object + Hub() +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/event/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/event/doc.go new file mode 100644 index 000000000..adba3bbc1 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/event/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package event contains the definitions for the Event types produced by source.Sources and transformed into +reconcile.Requests by handler.EventHandler. + +You should rarely need to work with these directly -- instead, use Controller.Watch with +source.Sources and handler.EventHandlers. + +Events generally contain both a full runtime.Object that caused the event, as well +as a direct handle to that object's metadata. This saves a lot of typecasting in +code that works with Events. +*/ +package event diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go new file mode 100644 index 000000000..271b3c00f --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package event + +import "sigs.k8s.io/controller-runtime/pkg/client" + +// CreateEvent is an event where a Kubernetes object was created. CreateEvent should be generated +// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. +type CreateEvent struct { + // Object is the object from the event + Object client.Object +} + +// UpdateEvent is an event where a Kubernetes object was updated. UpdateEvent should be generated +// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. +type UpdateEvent struct { + // ObjectOld is the object from the event + ObjectOld client.Object + + // ObjectNew is the object from the event + ObjectNew client.Object +} + +// DeleteEvent is an event where a Kubernetes object was deleted. DeleteEvent should be generated +// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. +type DeleteEvent struct { + // Object is the object from the event + Object client.Object + + // DeleteStateUnknown is true if the Delete event was missed but we identified the object + // as having been deleted. + DeleteStateUnknown bool +} + +// GenericEvent is an event where the operation type is unknown (e.g. polling or event originating outside the cluster). +// GenericEvent should be generated by a source.Source and transformed into a reconcile.Request by an +// handler.EventHandler. +type GenericEvent struct { + // Object is the object from the event + Object client.Object +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/handler/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/handler/doc.go new file mode 100644 index 000000000..3b5b79048 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/handler/doc.go @@ -0,0 +1,38 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package handler defines EventHandlers that enqueue reconcile.Requests in response to Create, Update, Deletion Events +observed from Watching Kubernetes APIs. Users should provide a source.Source and handler.EventHandler to +Controller.Watch in order to generate and enqueue reconcile.Request work items. + +Generally, following premade event handlers should be sufficient for most use cases: + +EventHandlers + +EnqueueRequestForObject - Enqueues a reconcile.Request containing the Name and Namespace of the object in the Event. This will +cause the object that was the source of the Event (e.g. the created / deleted / updated object) to be +reconciled. + +EnqueueRequestForOwner - Enqueues a reconcile.Request containing the Name and Namespace of the Owner of the object in the Event. +This will cause owner of the object that was the source of the Event (e.g. the owner object that created the object) +to be reconciled. + +EnqueueRequestsFromMapFunc - Enqueues reconcile.Requests resulting from a user provided transformation function run against the +object in the Event. This will cause an arbitrary collection of objects (defined from a transformation of the +source object) to be reconciled. +*/ +package handler diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go new file mode 100644 index 000000000..e6d3a4eaa --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go @@ -0,0 +1,90 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var enqueueLog = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForObject") + +type empty struct{} + +var _ EventHandler = &EnqueueRequestForObject{} + +// EnqueueRequestForObject enqueues a Request containing the Name and Namespace of the object that is the source of the Event. +// (e.g. the created / deleted / updated objects Name and Namespace). handler.EnqueueRequestForObject is used by almost all +// Controllers that have associated Resources (e.g. CRDs) to reconcile the associated Resource. +type EnqueueRequestForObject struct{} + +// Create implements EventHandler. +func (e *EnqueueRequestForObject) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { + if evt.Object == nil { + enqueueLog.Error(nil, "CreateEvent received with no metadata", "event", evt) + return + } + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: evt.Object.GetName(), + Namespace: evt.Object.GetNamespace(), + }}) +} + +// Update implements EventHandler. +func (e *EnqueueRequestForObject) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { + switch { + case evt.ObjectNew != nil: + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: evt.ObjectNew.GetName(), + Namespace: evt.ObjectNew.GetNamespace(), + }}) + case evt.ObjectOld != nil: + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: evt.ObjectOld.GetName(), + Namespace: evt.ObjectOld.GetNamespace(), + }}) + default: + enqueueLog.Error(nil, "UpdateEvent received with no metadata", "event", evt) + } +} + +// Delete implements EventHandler. +func (e *EnqueueRequestForObject) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { + if evt.Object == nil { + enqueueLog.Error(nil, "DeleteEvent received with no metadata", "event", evt) + return + } + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: evt.Object.GetName(), + Namespace: evt.Object.GetNamespace(), + }}) +} + +// Generic implements EventHandler. +func (e *EnqueueRequestForObject) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { + if evt.Object == nil { + enqueueLog.Error(nil, "GenericEvent received with no metadata", "event", evt) + return + } + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: evt.Object.GetName(), + Namespace: evt.Object.GetNamespace(), + }}) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go new file mode 100644 index 000000000..17401b1fd --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go @@ -0,0 +1,97 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +// MapFunc is the signature required for enqueueing requests from a generic function. +// This type is usually used with EnqueueRequestsFromMapFunc when registering an event handler. +type MapFunc func(client.Object) []reconcile.Request + +// EnqueueRequestsFromMapFunc enqueues Requests by running a transformation function that outputs a collection +// of reconcile.Requests on each Event. The reconcile.Requests may be for an arbitrary set of objects +// defined by some user specified transformation of the source Event. (e.g. trigger Reconciler for a set of objects +// in response to a cluster resize event caused by adding or deleting a Node) +// +// EnqueueRequestsFromMapFunc is frequently used to fan-out updates from one object to one or more other +// objects of a differing type. +// +// For UpdateEvents which contain both a new and old object, the transformation function is run on both +// objects and both sets of Requests are enqueue. +func EnqueueRequestsFromMapFunc(fn MapFunc) EventHandler { + return &enqueueRequestsFromMapFunc{ + toRequests: fn, + } +} + +var _ EventHandler = &enqueueRequestsFromMapFunc{} + +type enqueueRequestsFromMapFunc struct { + // Mapper transforms the argument into a slice of keys to be reconciled + toRequests MapFunc +} + +// Create implements EventHandler. +func (e *enqueueRequestsFromMapFunc) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.mapAndEnqueue(q, evt.Object, reqs) +} + +// Update implements EventHandler. +func (e *enqueueRequestsFromMapFunc) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.mapAndEnqueue(q, evt.ObjectOld, reqs) + e.mapAndEnqueue(q, evt.ObjectNew, reqs) +} + +// Delete implements EventHandler. +func (e *enqueueRequestsFromMapFunc) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.mapAndEnqueue(q, evt.Object, reqs) +} + +// Generic implements EventHandler. +func (e *enqueueRequestsFromMapFunc) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.mapAndEnqueue(q, evt.Object, reqs) +} + +func (e *enqueueRequestsFromMapFunc) mapAndEnqueue(q workqueue.RateLimitingInterface, object client.Object, reqs map[reconcile.Request]empty) { + for _, req := range e.toRequests(object) { + _, ok := reqs[req] + if !ok { + q.Add(req) + reqs[req] = empty{} + } + } +} + +// EnqueueRequestsFromMapFunc can inject fields into the mapper. + +// InjectFunc implements inject.Injector. +func (e *enqueueRequestsFromMapFunc) InjectFunc(f inject.Func) error { + if f == nil { + return nil + } + return f(e.toRequests) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go new file mode 100644 index 000000000..63699893f --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go @@ -0,0 +1,189 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +var _ EventHandler = &EnqueueRequestForOwner{} + +var log = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForOwner") + +// EnqueueRequestForOwner enqueues Requests for the Owners of an object. E.g. the object that created +// the object that was the source of the Event. +// +// If a ReplicaSet creates Pods, users may reconcile the ReplicaSet in response to Pod Events using: +// +// - a source.Kind Source with Type of Pod. +// +// - a handler.EnqueueRequestForOwner EventHandler with an OwnerType of ReplicaSet and IsController set to true. +type EnqueueRequestForOwner struct { + // OwnerType is the type of the Owner object to look for in OwnerReferences. Only Group and Kind are compared. + OwnerType runtime.Object + + // IsController if set will only look at the first OwnerReference with Controller: true. + IsController bool + + // groupKind is the cached Group and Kind from OwnerType + groupKind schema.GroupKind + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper +} + +// Create implements EventHandler. +func (e *EnqueueRequestForOwner) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.getOwnerReconcileRequest(evt.Object, reqs) + for req := range reqs { + q.Add(req) + } +} + +// Update implements EventHandler. +func (e *EnqueueRequestForOwner) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.getOwnerReconcileRequest(evt.ObjectOld, reqs) + e.getOwnerReconcileRequest(evt.ObjectNew, reqs) + for req := range reqs { + q.Add(req) + } +} + +// Delete implements EventHandler. +func (e *EnqueueRequestForOwner) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.getOwnerReconcileRequest(evt.Object, reqs) + for req := range reqs { + q.Add(req) + } +} + +// Generic implements EventHandler. +func (e *EnqueueRequestForOwner) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.getOwnerReconcileRequest(evt.Object, reqs) + for req := range reqs { + q.Add(req) + } +} + +// parseOwnerTypeGroupKind parses the OwnerType into a Group and Kind and caches the result. Returns false +// if the OwnerType could not be parsed using the scheme. +func (e *EnqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error { + // Get the kinds of the type + kinds, _, err := scheme.ObjectKinds(e.OwnerType) + if err != nil { + log.Error(err, "Could not get ObjectKinds for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType)) + return err + } + // Expect only 1 kind. If there is more than one kind this is probably an edge case such as ListOptions. + if len(kinds) != 1 { + err := fmt.Errorf("expected exactly 1 kind for OwnerType %T, but found %s kinds", e.OwnerType, kinds) + log.Error(nil, "expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType), "kinds", kinds) + return err + } + // Cache the Group and Kind for the OwnerType + e.groupKind = schema.GroupKind{Group: kinds[0].Group, Kind: kinds[0].Kind} + return nil +} + +// getOwnerReconcileRequest looks at object and builds a map of reconcile.Request to reconcile +// owners of object that match e.OwnerType. +func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, result map[reconcile.Request]empty) { + // Iterate through the OwnerReferences looking for a match on Group and Kind against what was requested + // by the user + for _, ref := range e.getOwnersReferences(object) { + // Parse the Group out of the OwnerReference to compare it to what was parsed out of the requested OwnerType + refGV, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + log.Error(err, "Could not parse OwnerReference APIVersion", + "api version", ref.APIVersion) + return + } + + // Compare the OwnerReference Group and Kind against the OwnerType Group and Kind specified by the user. + // If the two match, create a Request for the objected referred to by + // the OwnerReference. Use the Name from the OwnerReference and the Namespace from the + // object in the event. + if ref.Kind == e.groupKind.Kind && refGV.Group == e.groupKind.Group { + // Match found - add a Request for the object referred to in the OwnerReference + request := reconcile.Request{NamespacedName: types.NamespacedName{ + Name: ref.Name, + }} + + // if owner is not namespaced then we should set the namespace to the empty + mapping, err := e.mapper.RESTMapping(e.groupKind, refGV.Version) + if err != nil { + log.Error(err, "Could not retrieve rest mapping", "kind", e.groupKind) + return + } + if mapping.Scope.Name() != meta.RESTScopeNameRoot { + request.Namespace = object.GetNamespace() + } + + result[request] = empty{} + } + } +} + +// getOwnersReferences returns the OwnerReferences for an object as specified by the EnqueueRequestForOwner +// - if IsController is true: only take the Controller OwnerReference (if found) +// - if IsController is false: take all OwnerReferences. +func (e *EnqueueRequestForOwner) getOwnersReferences(object metav1.Object) []metav1.OwnerReference { + if object == nil { + return nil + } + + // If not filtered as Controller only, then use all the OwnerReferences + if !e.IsController { + return object.GetOwnerReferences() + } + // If filtered to a Controller, only take the Controller OwnerReference + if ownerRef := metav1.GetControllerOf(object); ownerRef != nil { + return []metav1.OwnerReference{*ownerRef} + } + // No Controller OwnerReference found + return nil +} + +var _ inject.Scheme = &EnqueueRequestForOwner{} + +// InjectScheme is called by the Controller to provide a singleton scheme to the EnqueueRequestForOwner. +func (e *EnqueueRequestForOwner) InjectScheme(s *runtime.Scheme) error { + return e.parseOwnerTypeGroupKind(s) +} + +var _ inject.Mapper = &EnqueueRequestForOwner{} + +// InjectMapper is called by the Controller to provide the rest mapper used by the manager. +func (e *EnqueueRequestForOwner) InjectMapper(m meta.RESTMapper) error { + e.mapper = m + return nil +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go new file mode 100644 index 000000000..8652d22d7 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go @@ -0,0 +1,104 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" +) + +// EventHandler enqueues reconcile.Requests in response to events (e.g. Pod Create). EventHandlers map an Event +// for one object to trigger Reconciles for either the same object or different objects - e.g. if there is an +// Event for object with type Foo (using source.KindSource) then reconcile one or more object(s) with type Bar. +// +// Identical reconcile.Requests will be batched together through the queuing mechanism before reconcile is called. +// +// * Use EnqueueRequestForObject to reconcile the object the event is for +// - do this for events for the type the Controller Reconciles. (e.g. Deployment for a Deployment Controller) +// +// * Use EnqueueRequestForOwner to reconcile the owner of the object the event is for +// - do this for events for the types the Controller creates. (e.g. ReplicaSets created by a Deployment Controller) +// +// * Use EnqueueRequestsFromMapFunc to transform an event for an object to a reconcile of an object +// of a different type - do this for events for types the Controller may be interested in, but doesn't create. +// (e.g. If Foo responds to cluster size events, map Node events to Foo objects.) +// +// Unless you are implementing your own EventHandler, you can ignore the functions on the EventHandler interface. +// Most users shouldn't need to implement their own EventHandler. +type EventHandler interface { + // Create is called in response to an create event - e.g. Pod Creation. + Create(event.CreateEvent, workqueue.RateLimitingInterface) + + // Update is called in response to an update event - e.g. Pod Updated. + Update(event.UpdateEvent, workqueue.RateLimitingInterface) + + // Delete is called in response to a delete event - e.g. Pod Deleted. + Delete(event.DeleteEvent, workqueue.RateLimitingInterface) + + // Generic is called in response to an event of an unknown type or a synthetic event triggered as a cron or + // external trigger request - e.g. reconcile Autoscaling, or a Webhook. + Generic(event.GenericEvent, workqueue.RateLimitingInterface) +} + +var _ EventHandler = Funcs{} + +// Funcs implements EventHandler. +type Funcs struct { + // Create is called in response to an add event. Defaults to no-op. + // RateLimitingInterface is used to enqueue reconcile.Requests. + CreateFunc func(event.CreateEvent, workqueue.RateLimitingInterface) + + // Update is called in response to an update event. Defaults to no-op. + // RateLimitingInterface is used to enqueue reconcile.Requests. + UpdateFunc func(event.UpdateEvent, workqueue.RateLimitingInterface) + + // Delete is called in response to a delete event. Defaults to no-op. + // RateLimitingInterface is used to enqueue reconcile.Requests. + DeleteFunc func(event.DeleteEvent, workqueue.RateLimitingInterface) + + // GenericFunc is called in response to a generic event. Defaults to no-op. + // RateLimitingInterface is used to enqueue reconcile.Requests. + GenericFunc func(event.GenericEvent, workqueue.RateLimitingInterface) +} + +// Create implements EventHandler. +func (h Funcs) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { + if h.CreateFunc != nil { + h.CreateFunc(e, q) + } +} + +// Delete implements EventHandler. +func (h Funcs) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { + if h.DeleteFunc != nil { + h.DeleteFunc(e, q) + } +} + +// Update implements EventHandler. +func (h Funcs) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { + if h.UpdateFunc != nil { + h.UpdateFunc(e, q) + } +} + +// Generic implements EventHandler. +func (h Funcs) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) { + if h.GenericFunc != nil { + h.GenericFunc(e, q) + } +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/doc.go new file mode 100644 index 000000000..9827eeafe --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/doc.go @@ -0,0 +1,32 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package healthz contains helpers from supporting liveness and readiness endpoints. +// (often referred to as healthz and readyz, respectively). +// +// This package draws heavily from the apiserver's healthz package +// ( https://github.com/kubernetes/apiserver/tree/master/pkg/server/healthz ) +// but has some changes to bring it in line with controller-runtime's style. +// +// The main entrypoint is the Handler -- this serves both aggregated health status +// and individual health check endpoints. +package healthz + +import ( + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("healthz") diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go new file mode 100644 index 000000000..bd1cc151a --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go @@ -0,0 +1,206 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package healthz + +import ( + "fmt" + "net/http" + "path" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// Handler is an http.Handler that aggregates the results of the given +// checkers to the root path, and supports calling individual checkers on +// subpaths of the name of the checker. +// +// Adding checks on the fly is *not* threadsafe -- use a wrapper. +type Handler struct { + Checks map[string]Checker +} + +// checkStatus holds the output of a particular check. +type checkStatus struct { + name string + healthy bool + excluded bool +} + +func (h *Handler) serveAggregated(resp http.ResponseWriter, req *http.Request) { + failed := false + excluded := getExcludedChecks(req) + + parts := make([]checkStatus, 0, len(h.Checks)) + + // calculate the results... + for checkName, check := range h.Checks { + // no-op the check if we've specified we want to exclude the check + if excluded.Has(checkName) { + excluded.Delete(checkName) + parts = append(parts, checkStatus{name: checkName, healthy: true, excluded: true}) + continue + } + if err := check(req); err != nil { + log.V(1).Info("healthz check failed", "checker", checkName, "error", err) + parts = append(parts, checkStatus{name: checkName, healthy: false}) + failed = true + } else { + parts = append(parts, checkStatus{name: checkName, healthy: true}) + } + } + + // ...default a check if none is present... + if len(h.Checks) == 0 { + parts = append(parts, checkStatus{name: "ping", healthy: true}) + } + + for _, c := range excluded.List() { + log.V(1).Info("cannot exclude health check, no matches for it", "checker", c) + } + + // ...sort to be consistent... + sort.Slice(parts, func(i, j int) bool { return parts[i].name < parts[j].name }) + + // ...and write out the result + // TODO(directxman12): this should also accept a request for JSON content (via a accept header) + _, forceVerbose := req.URL.Query()["verbose"] + writeStatusesAsText(resp, parts, excluded, failed, forceVerbose) +} + +// writeStatusAsText writes out the given check statuses in some semi-arbitrary +// bespoke text format that we copied from Kubernetes. unknownExcludes lists +// any checks that the user requested to have excluded, but weren't actually +// known checks. writeStatusAsText is always verbose on failure, and can be +// forced to be verbose on success using the given argument. +func writeStatusesAsText(resp http.ResponseWriter, parts []checkStatus, unknownExcludes sets.String, failed, forceVerbose bool) { + resp.Header().Set("Content-Type", "text/plain; charset=utf-8") + resp.Header().Set("X-Content-Type-Options", "nosniff") + + // always write status code first + if failed { + resp.WriteHeader(http.StatusInternalServerError) + } else { + resp.WriteHeader(http.StatusOK) + } + + // shortcut for easy non-verbose success + if !failed && !forceVerbose { + fmt.Fprint(resp, "ok") + return + } + + // we're always verbose on failure, so from this point on we're guaranteed to be verbose + + for _, checkOut := range parts { + switch { + case checkOut.excluded: + fmt.Fprintf(resp, "[+]%s excluded: ok\n", checkOut.name) + case checkOut.healthy: + fmt.Fprintf(resp, "[+]%s ok\n", checkOut.name) + default: + // don't include the error since this endpoint is public. If someone wants more detail + // they should have explicit permission to the detailed checks. + fmt.Fprintf(resp, "[-]%s failed: reason withheld\n", checkOut.name) + } + } + + if unknownExcludes.Len() > 0 { + fmt.Fprintf(resp, "warn: some health checks cannot be excluded: no matches for %s\n", formatQuoted(unknownExcludes.List()...)) + } + + if failed { + log.Info("healthz check failed", "statuses", parts) + fmt.Fprintf(resp, "healthz check failed\n") + } else { + fmt.Fprint(resp, "healthz check passed\n") + } +} + +func (h *Handler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + // clean up the request (duplicating the internal logic of http.ServeMux a bit) + // clean up the path a bit + reqPath := req.URL.Path + if reqPath == "" || reqPath[0] != '/' { + reqPath = "/" + reqPath + } + // path.Clean removes the trailing slash except for root for us + // (which is fine, since we're only serving one layer of sub-paths) + reqPath = path.Clean(reqPath) + + // either serve the root endpoint... + if reqPath == "/" { + h.serveAggregated(resp, req) + return + } + + // ...the default check (if nothing else is present)... + if len(h.Checks) == 0 && reqPath[1:] == "ping" { + CheckHandler{Checker: Ping}.ServeHTTP(resp, req) + return + } + + // ...or an individual checker + checkName := reqPath[1:] // ignore the leading slash + checker, known := h.Checks[checkName] + if !known { + http.NotFoundHandler().ServeHTTP(resp, req) + return + } + + CheckHandler{Checker: checker}.ServeHTTP(resp, req) +} + +// CheckHandler is an http.Handler that serves a health check endpoint at the root path, +// based on its checker. +type CheckHandler struct { + Checker +} + +func (h CheckHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + if err := h.Checker(req); err != nil { + http.Error(resp, fmt.Sprintf("internal server error: %v", err), http.StatusInternalServerError) + } else { + fmt.Fprint(resp, "ok") + } +} + +// Checker knows how to perform a health check. +type Checker func(req *http.Request) error + +// Ping returns true automatically when checked. +var Ping Checker = func(_ *http.Request) error { return nil } + +// getExcludedChecks extracts the health check names to be excluded from the query param. +func getExcludedChecks(r *http.Request) sets.String { + checks, found := r.URL.Query()["exclude"] + if found { + return sets.NewString(checks...) + } + return sets.NewString() +} + +// formatQuoted returns a formatted string of the health check names, +// preserving the order passed in. +func formatQuoted(names ...string) string { + quoted := make([]string, 0, len(names)) + for _, name := range names { + quoted = append(quoted, fmt.Sprintf("%q", name)) + } + return strings.Join(quoted, ",") +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go new file mode 100644 index 000000000..3732eea16 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go @@ -0,0 +1,360 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/go-logr/logr" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/handler" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var _ inject.Injector = &Controller{} + +// Controller implements controller.Controller. +type Controller struct { + // Name is used to uniquely identify a Controller in tracing, logging and monitoring. Name is required. + Name string + + // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1. + MaxConcurrentReconciles int + + // Reconciler is a function that can be called at any time with the Name / Namespace of an object and + // ensures that the state of the system matches the state specified in the object. + // Defaults to the DefaultReconcileFunc. + Do reconcile.Reconciler + + // MakeQueue constructs the queue for this controller once the controller is ready to start. + // This exists because the standard Kubernetes workqueues start themselves immediately, which + // leads to goroutine leaks if something calls controller.New repeatedly. + MakeQueue func() workqueue.RateLimitingInterface + + // Queue is an listeningQueue that listens for events from Informers and adds object keys to + // the Queue for processing + Queue workqueue.RateLimitingInterface + + // SetFields is used to inject dependencies into other objects such as Sources, EventHandlers and Predicates + // Deprecated: the caller should handle injected fields itself. + SetFields func(i interface{}) error + + // mu is used to synchronize Controller setup + mu sync.Mutex + + // Started is true if the Controller has been Started + Started bool + + // ctx is the context that was passed to Start() and used when starting watches. + // + // According to the docs, contexts should not be stored in a struct: https://golang.org/pkg/context, + // while we usually always strive to follow best practices, we consider this a legacy case and it should + // undergo a major refactoring and redesign to allow for context to not be stored in a struct. + ctx context.Context + + // CacheSyncTimeout refers to the time limit set on waiting for cache to sync + // Defaults to 2 minutes if not set. + CacheSyncTimeout time.Duration + + // startWatches maintains a list of sources, handlers, and predicates to start when the controller is started. + startWatches []watchDescription + + // LogConstructor is used to construct a logger to then log messages to users during reconciliation, + // or for example when a watch is started. + // Note: LogConstructor has to be able to handle nil requests as we are also using it + // outside the context of a reconciliation. + LogConstructor func(request *reconcile.Request) logr.Logger + + // RecoverPanic indicates whether the panic caused by reconcile should be recovered. + RecoverPanic bool +} + +// watchDescription contains all the information necessary to start a watch. +type watchDescription struct { + src source.Source + handler handler.EventHandler + predicates []predicate.Predicate +} + +// Reconcile implements reconcile.Reconciler. +func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) { + defer func() { + if r := recover(); r != nil { + if c.RecoverPanic { + for _, fn := range utilruntime.PanicHandlers { + fn(r) + } + err = fmt.Errorf("panic: %v [recovered]", r) + return + } + + log := logf.FromContext(ctx) + log.Info(fmt.Sprintf("Observed a panic in reconciler: %v", r)) + panic(r) + } + }() + return c.Do.Reconcile(ctx, req) +} + +// Watch implements controller.Controller. +func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prct ...predicate.Predicate) error { + c.mu.Lock() + defer c.mu.Unlock() + + // Inject Cache into arguments + if err := c.SetFields(src); err != nil { + return err + } + if err := c.SetFields(evthdler); err != nil { + return err + } + for _, pr := range prct { + if err := c.SetFields(pr); err != nil { + return err + } + } + + // Controller hasn't started yet, store the watches locally and return. + // + // These watches are going to be held on the controller struct until the manager or user calls Start(...). + if !c.Started { + c.startWatches = append(c.startWatches, watchDescription{src: src, handler: evthdler, predicates: prct}) + return nil + } + + c.LogConstructor(nil).Info("Starting EventSource", "source", src) + return src.Start(c.ctx, evthdler, c.Queue, prct...) +} + +// Start implements controller.Controller. +func (c *Controller) Start(ctx context.Context) error { + // use an IIFE to get proper lock handling + // but lock outside to get proper handling of the queue shutdown + c.mu.Lock() + if c.Started { + return errors.New("controller was started more than once. This is likely to be caused by being added to a manager multiple times") + } + + c.initMetrics() + + // Set the internal context. + c.ctx = ctx + + c.Queue = c.MakeQueue() + go func() { + <-ctx.Done() + c.Queue.ShutDown() + }() + + wg := &sync.WaitGroup{} + err := func() error { + defer c.mu.Unlock() + + // TODO(pwittrock): Reconsider HandleCrash + defer utilruntime.HandleCrash() + + // NB(directxman12): launch the sources *before* trying to wait for the + // caches to sync so that they have a chance to register their intendeded + // caches. + for _, watch := range c.startWatches { + c.LogConstructor(nil).Info("Starting EventSource", "source", fmt.Sprintf("%s", watch.src)) + + if err := watch.src.Start(ctx, watch.handler, c.Queue, watch.predicates...); err != nil { + return err + } + } + + // Start the SharedIndexInformer factories to begin populating the SharedIndexInformer caches + c.LogConstructor(nil).Info("Starting Controller") + + for _, watch := range c.startWatches { + syncingSource, ok := watch.src.(source.SyncingSource) + if !ok { + continue + } + + if err := func() error { + // use a context with timeout for launching sources and syncing caches. + sourceStartCtx, cancel := context.WithTimeout(ctx, c.CacheSyncTimeout) + defer cancel() + + // WaitForSync waits for a definitive timeout, and returns if there + // is an error or a timeout + if err := syncingSource.WaitForSync(sourceStartCtx); err != nil { + err := fmt.Errorf("failed to wait for %s caches to sync: %w", c.Name, err) + c.LogConstructor(nil).Error(err, "Could not wait for Cache to sync") + return err + } + + return nil + }(); err != nil { + return err + } + } + + // All the watches have been started, we can reset the local slice. + // + // We should never hold watches more than necessary, each watch source can hold a backing cache, + // which won't be garbage collected if we hold a reference to it. + c.startWatches = nil + + // Launch workers to process resources + c.LogConstructor(nil).Info("Starting workers", "worker count", c.MaxConcurrentReconciles) + wg.Add(c.MaxConcurrentReconciles) + for i := 0; i < c.MaxConcurrentReconciles; i++ { + go func() { + defer wg.Done() + // Run a worker thread that just dequeues items, processes them, and marks them done. + // It enforces that the reconcileHandler is never invoked concurrently with the same object. + for c.processNextWorkItem(ctx) { + } + }() + } + + c.Started = true + return nil + }() + if err != nil { + return err + } + + <-ctx.Done() + c.LogConstructor(nil).Info("Shutdown signal received, waiting for all workers to finish") + wg.Wait() + c.LogConstructor(nil).Info("All workers finished") + return nil +} + +// processNextWorkItem will read a single work item off the workqueue and +// attempt to process it, by calling the reconcileHandler. +func (c *Controller) processNextWorkItem(ctx context.Context) bool { + obj, shutdown := c.Queue.Get() + if shutdown { + // Stop working + return false + } + + // We call Done here so the workqueue knows we have finished + // processing this item. We also must remember to call Forget if we + // do not want this work item being re-queued. For example, we do + // not call Forget if a transient error occurs, instead the item is + // put back on the workqueue and attempted again after a back-off + // period. + defer c.Queue.Done(obj) + + ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Add(1) + defer ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Add(-1) + + c.reconcileHandler(ctx, obj) + return true +} + +const ( + labelError = "error" + labelRequeueAfter = "requeue_after" + labelRequeue = "requeue" + labelSuccess = "success" +) + +func (c *Controller) initMetrics() { + ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Set(0) + ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Add(0) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Add(0) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Add(0) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Add(0) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Add(0) + ctrlmetrics.WorkerCount.WithLabelValues(c.Name).Set(float64(c.MaxConcurrentReconciles)) +} + +func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) { + // Update metrics after processing each item + reconcileStartTS := time.Now() + defer func() { + c.updateMetrics(time.Since(reconcileStartTS)) + }() + + // Make sure that the object is a valid request. + req, ok := obj.(reconcile.Request) + if !ok { + // As the item in the workqueue is actually invalid, we call + // Forget here else we'd go into a loop of attempting to + // process a work item that is invalid. + c.Queue.Forget(obj) + c.LogConstructor(nil).Error(nil, "Queue item was not a Request", "type", fmt.Sprintf("%T", obj), "value", obj) + // Return true, don't take a break + return + } + + log := c.LogConstructor(&req) + + log = log.WithValues("reconcileID", uuid.NewUUID()) + ctx = logf.IntoContext(ctx, log) + + // RunInformersAndControllers the syncHandler, passing it the Namespace/Name string of the + // resource to be synced. + result, err := c.Reconcile(ctx, req) + switch { + case err != nil: + c.Queue.AddRateLimited(req) + ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Inc() + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Inc() + log.Error(err, "Reconciler error") + case result.RequeueAfter > 0: + // The result.RequeueAfter request will be lost, if it is returned + // along with a non-nil error. But this is intended as + // We need to drive to stable reconcile loops before queuing due + // to result.RequestAfter + c.Queue.Forget(obj) + c.Queue.AddAfter(req, result.RequeueAfter) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Inc() + case result.Requeue: + c.Queue.AddRateLimited(req) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Inc() + default: + // Finally, if no error occurs we Forget this item so it does not + // get queued again until another change happens. + c.Queue.Forget(obj) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Inc() + } +} + +// GetLogger returns this controller's logger. +func (c *Controller) GetLogger() logr.Logger { + return c.LogConstructor(nil) +} + +// InjectFunc implement SetFields.Injector. +func (c *Controller) InjectFunc(f inject.Func) error { + c.SetFields = f + return nil +} + +// updateMetrics updates prometheus metrics within the controller. +func (c *Controller) updateMetrics(reconcileTime time.Duration) { + ctrlmetrics.ReconcileTime.WithLabelValues(c.Name).Observe(reconcileTime.Seconds()) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go new file mode 100644 index 000000000..baec66927 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var ( + // ReconcileTotal is a prometheus counter metrics which holds the total + // number of reconciliations per controller. It has two labels. controller label refers + // to the controller name and result label refers to the reconcile result i.e + // success, error, requeue, requeue_after. + ReconcileTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "controller_runtime_reconcile_total", + Help: "Total number of reconciliations per controller", + }, []string{"controller", "result"}) + + // ReconcileErrors is a prometheus counter metrics which holds the total + // number of errors from the Reconciler. + ReconcileErrors = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "controller_runtime_reconcile_errors_total", + Help: "Total number of reconciliation errors per controller", + }, []string{"controller"}) + + // ReconcileTime is a prometheus metric which keeps track of the duration + // of reconciliations. + ReconcileTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: "controller_runtime_reconcile_time_seconds", + Help: "Length of time per reconciliation per controller", + Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, + 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 60}, + }, []string{"controller"}) + + // WorkerCount is a prometheus metric which holds the number of + // concurrent reconciles per controller. + WorkerCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "controller_runtime_max_concurrent_reconciles", + Help: "Maximum number of concurrent reconciles per controller", + }, []string{"controller"}) + + // ActiveWorkers is a prometheus metric which holds the number + // of active workers per controller. + ActiveWorkers = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "controller_runtime_active_workers", + Help: "Number of currently used workers per controller", + }, []string{"controller"}) +) + +func init() { + metrics.Registry.MustRegister( + ReconcileTotal, + ReconcileErrors, + ReconcileTime, + WorkerCount, + ActiveWorkers, + // expose process metrics like CPU, Memory, file descriptor usage etc. + collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}), + // expose Go runtime metrics like GC stats, memory stats etc. + collectors.NewGoCollector(), + ) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/httpserver/server.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/httpserver/server.go new file mode 100644 index 000000000..b5f91f18e --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/httpserver/server.go @@ -0,0 +1,16 @@ +package httpserver + +import ( + "net/http" + "time" +) + +// New returns a new server with sane defaults. +func New(handler http.Handler) *http.Server { + return &http.Server{ + Handler: handler, + MaxHeaderBytes: 1 << 20, + IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout + ReadHeaderTimeout: 32 * time.Second, + } +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go new file mode 100644 index 000000000..d91a0ca50 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go @@ -0,0 +1,32 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "github.com/go-logr/logr" + + "sigs.k8s.io/controller-runtime/pkg/log" +) + +var ( + // RuntimeLog is a base parent logger for use inside controller-runtime. + RuntimeLog logr.Logger +) + +func init() { + RuntimeLog = log.Log.WithName("controller-runtime") +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go new file mode 100644 index 000000000..7057f3dbe --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package objectutil + +import ( + "errors" + "fmt" + + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// FilterWithLabels returns a copy of the items in objs matching labelSel. +func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) { + outItems := make([]runtime.Object, 0, len(objs)) + for _, obj := range objs { + meta, err := apimeta.Accessor(obj) + if err != nil { + return nil, err + } + if labelSel != nil { + lbls := labels.Set(meta.GetLabels()) + if !labelSel.Matches(lbls) { + continue + } + } + outItems = append(outItems, obj.DeepCopyObject()) + } + return outItems, nil +} + +// IsAPINamespaced returns true if the object is namespace scoped. +// For unstructured objects the gvk is found from the object itself. +func IsAPINamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return false, err + } + + return IsAPINamespacedWithGVK(gvk, scheme, restmapper) +} + +// IsAPINamespacedWithGVK returns true if the object having the provided +// GVK is namespace scoped. +func IsAPINamespacedWithGVK(gk schema.GroupVersionKind, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { + restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gk.Group, Kind: gk.Kind}) + if err != nil { + return false, fmt.Errorf("failed to get restmapping: %w", err) + } + + scope := restmapping.Scope.Name() + + if scope == "" { + return false, errors.New("scope cannot be identified, empty scope returned") + } + + if scope != apimeta.RESTScopeNameRoot { + return true, nil + } + return false, nil +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go new file mode 100644 index 000000000..46cc1714b --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go @@ -0,0 +1,176 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package recorder + +import ( + "context" + "fmt" + "sync" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" +) + +// EventBroadcasterProducer makes an event broadcaster, returning +// whether or not the broadcaster should be stopped with the Provider, +// or not (e.g. if it's shared, it shouldn't be stopped with the Provider). +type EventBroadcasterProducer func() (caster record.EventBroadcaster, stopWithProvider bool) + +// Provider is a recorder.Provider that records events to the k8s API server +// and to a logr Logger. +type Provider struct { + lock sync.RWMutex + stopped bool + + // scheme to specify when creating a recorder + scheme *runtime.Scheme + // logger is the logger to use when logging diagnostic event info + logger logr.Logger + evtClient corev1client.EventInterface + makeBroadcaster EventBroadcasterProducer + + broadcasterOnce sync.Once + broadcaster record.EventBroadcaster + stopBroadcaster bool +} + +// NB(directxman12): this manually implements Stop instead of Being a runnable because we need to +// stop it *after* everything else shuts down, otherwise we'll cause panics as the leader election +// code finishes up and tries to continue emitting events. + +// Stop attempts to stop this provider, stopping the underlying broadcaster +// if the broadcaster asked to be stopped. It kinda tries to honor the given +// context, but the underlying broadcaster has an indefinite wait that doesn't +// return until all queued events are flushed, so this may end up just returning +// before the underlying wait has finished instead of cancelling the wait. +// This is Very Frustrating™. +func (p *Provider) Stop(shutdownCtx context.Context) { + doneCh := make(chan struct{}) + + go func() { + // technically, this could start the broadcaster, but practically, it's + // almost certainly already been started (e.g. by leader election). We + // need to invoke this to ensure that we don't inadvertently race with + // an invocation of getBroadcaster. + broadcaster := p.getBroadcaster() + if p.stopBroadcaster { + p.lock.Lock() + broadcaster.Shutdown() + p.stopped = true + p.lock.Unlock() + } + close(doneCh) + }() + + select { + case <-shutdownCtx.Done(): + case <-doneCh: + } +} + +// getBroadcaster ensures that a broadcaster is started for this +// provider, and returns it. It's threadsafe. +func (p *Provider) getBroadcaster() record.EventBroadcaster { + // NB(directxman12): this can technically still leak if something calls + // "getBroadcaster" (i.e. Emits an Event) but never calls Start, but if we + // create the broadcaster in start, we could race with other things that + // are started at the same time & want to emit events. The alternative is + // silently swallowing events and more locking, but that seems suboptimal. + + p.broadcasterOnce.Do(func() { + broadcaster, stop := p.makeBroadcaster() + broadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{Interface: p.evtClient}) + broadcaster.StartEventWatcher( + func(e *corev1.Event) { + p.logger.V(1).Info(e.Type, "object", e.InvolvedObject, "reason", e.Reason, "message", e.Message) + }) + p.broadcaster = broadcaster + p.stopBroadcaster = stop + }) + + return p.broadcaster +} + +// NewProvider create a new Provider instance. +func NewProvider(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster EventBroadcasterProducer) (*Provider, error) { + corev1Client, err := corev1client.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("failed to init client: %w", err) + } + + p := &Provider{scheme: scheme, logger: logger, makeBroadcaster: makeBroadcaster, evtClient: corev1Client.Events("")} + return p, nil +} + +// GetEventRecorderFor returns an event recorder that broadcasts to this provider's +// broadcaster. All events will be associated with a component of the given name. +func (p *Provider) GetEventRecorderFor(name string) record.EventRecorder { + return &lazyRecorder{ + prov: p, + name: name, + } +} + +// lazyRecorder is a recorder that doesn't actually instantiate any underlying +// recorder until the first event is emitted. +type lazyRecorder struct { + prov *Provider + name string + + recOnce sync.Once + rec record.EventRecorder +} + +// ensureRecording ensures that a concrete recorder is populated for this recorder. +func (l *lazyRecorder) ensureRecording() { + l.recOnce.Do(func() { + broadcaster := l.prov.getBroadcaster() + l.rec = broadcaster.NewRecorder(l.prov.scheme, corev1.EventSource{Component: l.name}) + }) +} + +func (l *lazyRecorder) Event(object runtime.Object, eventtype, reason, message string) { + l.ensureRecording() + + l.prov.lock.RLock() + if !l.prov.stopped { + l.rec.Event(object, eventtype, reason, message) + } + l.prov.lock.RUnlock() +} +func (l *lazyRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + l.ensureRecording() + + l.prov.lock.RLock() + if !l.prov.stopped { + l.rec.Eventf(object, eventtype, reason, messageFmt, args...) + } + l.prov.lock.RUnlock() +} +func (l *lazyRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + l.ensureRecording() + + l.prov.lock.RLock() + if !l.prov.stopped { + l.rec.AnnotatedEventf(object, annotations, eventtype, reason, messageFmt, args...) + } + l.prov.lock.RUnlock() +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/doc.go new file mode 100644 index 000000000..37a9aefab --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package leaderelection contains a constructor for a leader election resource lock. +This is used to ensure that multiple copies of a controller manager can be run with +only one active set of controllers, for active-passive HA. + +It uses built-in Kubernetes leader election APIs. +*/ +package leaderelection diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go new file mode 100644 index 000000000..ee4fcf4cb --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go @@ -0,0 +1,127 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package leaderelection + +import ( + "errors" + "fmt" + "os" + + "k8s.io/apimachinery/pkg/util/uuid" + coordinationv1client "k8s.io/client-go/kubernetes/typed/coordination/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/leaderelection/resourcelock" + + "sigs.k8s.io/controller-runtime/pkg/recorder" +) + +const inClusterNamespacePath = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" + +// Options provides the required configuration to create a new resource lock. +type Options struct { + // LeaderElection determines whether or not to use leader election when + // starting the manager. + LeaderElection bool + + // LeaderElectionResourceLock determines which resource lock to use for leader election, + // defaults to "leases". + LeaderElectionResourceLock string + + // LeaderElectionNamespace determines the namespace in which the leader + // election resource will be created. + LeaderElectionNamespace string + + // LeaderElectionID determines the name of the resource that leader election + // will use for holding the leader lock. + LeaderElectionID string +} + +// NewResourceLock creates a new resource lock for use in a leader election loop. +func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, options Options) (resourcelock.Interface, error) { + if !options.LeaderElection { + return nil, nil + } + + // Default resource lock to "leases". The previous default (from v0.7.0 to v0.11.x) was configmapsleases, which was + // used to migrate from configmaps to leases. Since the default was "configmapsleases" for over a year, spanning + // five minor releases, any actively maintained operators are very likely to have a released version that uses + // "configmapsleases". Therefore defaulting to "leases" should be safe. + if options.LeaderElectionResourceLock == "" { + options.LeaderElectionResourceLock = resourcelock.LeasesResourceLock + } + + // LeaderElectionID must be provided to prevent clashes + if options.LeaderElectionID == "" { + return nil, errors.New("LeaderElectionID must be configured") + } + + // Default the namespace (if running in cluster) + if options.LeaderElectionNamespace == "" { + var err error + options.LeaderElectionNamespace, err = getInClusterNamespace() + if err != nil { + return nil, fmt.Errorf("unable to find leader election namespace: %w", err) + } + } + + // Leader id, needs to be unique + id, err := os.Hostname() + if err != nil { + return nil, err + } + id = id + "_" + string(uuid.NewUUID()) + + // Construct clients for leader election + rest.AddUserAgent(config, "leader-election") + corev1Client, err := corev1client.NewForConfig(config) + if err != nil { + return nil, err + } + + coordinationClient, err := coordinationv1client.NewForConfig(config) + if err != nil { + return nil, err + } + + return resourcelock.New(options.LeaderElectionResourceLock, + options.LeaderElectionNamespace, + options.LeaderElectionID, + corev1Client, + coordinationClient, + resourcelock.ResourceLockConfig{ + Identity: id, + EventRecorder: recorderProvider.GetEventRecorderFor(id), + }) +} + +func getInClusterNamespace() (string, error) { + // Check whether the namespace file exists. + // If not, we are not running in cluster so can't guess the namespace. + if _, err := os.Stat(inClusterNamespacePath); os.IsNotExist(err) { + return "", fmt.Errorf("not running in-cluster, please specify LeaderElectionNamespace") + } else if err != nil { + return "", fmt.Errorf("error checking namespace file: %w", err) + } + + // Load the namespace file and return its content + namespace, err := os.ReadFile(inClusterNamespacePath) + if err != nil { + return "", fmt.Errorf("error reading namespace file: %w", err) + } + return string(namespace), nil +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go new file mode 100644 index 000000000..6fca63063 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go @@ -0,0 +1,188 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "sync" + + "github.com/go-logr/logr" +) + +// loggerPromise knows how to populate a concrete logr.Logger +// with options, given an actual base logger later on down the line. +type loggerPromise struct { + logger *DelegatingLogSink + childPromises []*loggerPromise + promisesLock sync.Mutex + + name *string + tags []interface{} +} + +func (p *loggerPromise) WithName(l *DelegatingLogSink, name string) *loggerPromise { + res := &loggerPromise{ + logger: l, + name: &name, + promisesLock: sync.Mutex{}, + } + + p.promisesLock.Lock() + defer p.promisesLock.Unlock() + p.childPromises = append(p.childPromises, res) + return res +} + +// WithValues provides a new Logger with the tags appended. +func (p *loggerPromise) WithValues(l *DelegatingLogSink, tags ...interface{}) *loggerPromise { + res := &loggerPromise{ + logger: l, + tags: tags, + promisesLock: sync.Mutex{}, + } + + p.promisesLock.Lock() + defer p.promisesLock.Unlock() + p.childPromises = append(p.childPromises, res) + return res +} + +// Fulfill instantiates the Logger with the provided logger. +func (p *loggerPromise) Fulfill(parentLogSink logr.LogSink) { + sink := parentLogSink + if p.name != nil { + sink = sink.WithName(*p.name) + } + + if p.tags != nil { + sink = sink.WithValues(p.tags...) + } + + p.logger.lock.Lock() + p.logger.logger = sink + p.logger.promise = nil + p.logger.lock.Unlock() + + for _, childPromise := range p.childPromises { + childPromise.Fulfill(sink) + } +} + +// DelegatingLogSink is a logsink that delegates to another logr.LogSink. +// If the underlying promise is not nil, it registers calls to sub-loggers with +// the logging factory to be populated later, and returns a new delegating +// logger. It expects to have *some* logr.Logger set at all times (generally +// a no-op logger before the promises are fulfilled). +type DelegatingLogSink struct { + lock sync.RWMutex + logger logr.LogSink + promise *loggerPromise + info logr.RuntimeInfo +} + +// Init implements logr.LogSink. +func (l *DelegatingLogSink) Init(info logr.RuntimeInfo) { + l.lock.Lock() + defer l.lock.Unlock() + l.info = info +} + +// Enabled tests whether this Logger is enabled. For example, commandline +// flags might be used to set the logging verbosity and disable some info +// logs. +func (l *DelegatingLogSink) Enabled(level int) bool { + l.lock.RLock() + defer l.lock.RUnlock() + return l.logger.Enabled(level) +} + +// Info logs a non-error message with the given key/value pairs as context. +// +// The msg argument should be used to add some constant description to +// the log line. The key/value pairs can then be used to add additional +// variable information. The key/value pairs should alternate string +// keys and arbitrary values. +func (l *DelegatingLogSink) Info(level int, msg string, keysAndValues ...interface{}) { + l.lock.RLock() + defer l.lock.RUnlock() + l.logger.Info(level, msg, keysAndValues...) +} + +// Error logs an error, with the given message and key/value pairs as context. +// It functions similarly to calling Info with the "error" named value, but may +// have unique behavior, and should be preferred for logging errors (see the +// package documentations for more information). +// +// The msg field should be used to add context to any underlying error, +// while the err field should be used to attach the actual error that +// triggered this log line, if present. +func (l *DelegatingLogSink) Error(err error, msg string, keysAndValues ...interface{}) { + l.lock.RLock() + defer l.lock.RUnlock() + l.logger.Error(err, msg, keysAndValues...) +} + +// WithName provides a new Logger with the name appended. +func (l *DelegatingLogSink) WithName(name string) logr.LogSink { + l.lock.RLock() + defer l.lock.RUnlock() + + if l.promise == nil { + return l.logger.WithName(name) + } + + res := &DelegatingLogSink{logger: l.logger} + promise := l.promise.WithName(res, name) + res.promise = promise + + return res +} + +// WithValues provides a new Logger with the tags appended. +func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { + l.lock.RLock() + defer l.lock.RUnlock() + + if l.promise == nil { + return l.logger.WithValues(tags...) + } + + res := &DelegatingLogSink{logger: l.logger} + promise := l.promise.WithValues(res, tags...) + res.promise = promise + + return res +} + +// Fulfill switches the logger over to use the actual logger +// provided, instead of the temporary initial one, if this method +// has not been previously called. +func (l *DelegatingLogSink) Fulfill(actual logr.LogSink) { + if l.promise != nil { + l.promise.Fulfill(actual) + } +} + +// NewDelegatingLogSink constructs a new DelegatingLogSink which uses +// the given logger before its promise is fulfilled. +func NewDelegatingLogSink(initial logr.LogSink) *DelegatingLogSink { + l := &DelegatingLogSink{ + logger: initial, + promise: &loggerPromise{promisesLock: sync.Mutex{}}, + } + l.promise.logger = l + return l +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go new file mode 100644 index 000000000..3965769c3 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go @@ -0,0 +1,102 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package log contains utilities for fetching a new logger +// when one is not already available. +// +// The Log Handle +// +// This package contains a root logr.Logger Log. It may be used to +// get a handle to whatever the root logging implementation is. By +// default, no implementation exists, and the handle returns "promises" +// to loggers. When the implementation is set using SetLogger, these +// "promises" will be converted over to real loggers. +// +// Logr +// +// All logging in controller-runtime is structured, using a set of interfaces +// defined by a package called logr +// (https://pkg.go.dev/github.com/go-logr/logr). The sub-package zap provides +// helpers for setting up logr backed by Zap (go.uber.org/zap). +package log + +import ( + "context" + "sync" + "time" + + "github.com/go-logr/logr" +) + +// SetLogger sets a concrete logging implementation for all deferred Loggers. +func SetLogger(l logr.Logger) { + loggerWasSetLock.Lock() + defer loggerWasSetLock.Unlock() + + loggerWasSet = true + dlog.Fulfill(l.GetSink()) +} + +// It is safe to assume that if this wasn't set within the first 30 seconds of a binaries +// lifetime, it will never get set. The DelegatingLogSink causes a high number of memory +// allocations when not given an actual Logger, so we set a NullLogSink to avoid that. +// +// We need to keep the DelegatingLogSink because we have various inits() that get a logger from +// here. They will always get executed before any code that imports controller-runtime +// has a chance to run and hence to set an actual logger. +func init() { + // Init is blocking, so start a new goroutine + go func() { + time.Sleep(30 * time.Second) + loggerWasSetLock.Lock() + defer loggerWasSetLock.Unlock() + if !loggerWasSet { + dlog.Fulfill(NullLogSink{}) + } + }() +} + +var ( + loggerWasSetLock sync.Mutex + loggerWasSet bool +) + +// Log is the base logger used by kubebuilder. It delegates +// to another logr.Logger. You *must* call SetLogger to +// get any actual logging. If SetLogger is not called within +// the first 30 seconds of a binaries lifetime, it will get +// set to a NullLogSink. +var ( + dlog = NewDelegatingLogSink(NullLogSink{}) + Log = logr.New(dlog) +) + +// FromContext returns a logger with predefined values from a context.Context. +func FromContext(ctx context.Context, keysAndValues ...interface{}) logr.Logger { + log := Log + if ctx != nil { + if logger, err := logr.FromContext(ctx); err == nil { + log = logger + } + } + return log.WithValues(keysAndValues...) +} + +// IntoContext takes a context and sets the logger as one of its values. +// Use FromContext function to retrieve the logger. +func IntoContext(ctx context.Context, log logr.Logger) context.Context { + return logr.NewContext(ctx, log) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go new file mode 100644 index 000000000..f3e81074f --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "github.com/go-logr/logr" +) + +// NB: this is the same as the null logger logr/testing, +// but avoids accidentally adding the testing flags to +// all binaries. + +// NullLogSink is a logr.Logger that does nothing. +type NullLogSink struct{} + +var _ logr.LogSink = NullLogSink{} + +// Init implements logr.LogSink. +func (log NullLogSink) Init(logr.RuntimeInfo) { +} + +// Info implements logr.InfoLogger. +func (NullLogSink) Info(_ int, _ string, _ ...interface{}) { + // Do nothing. +} + +// Enabled implements logr.InfoLogger. +func (NullLogSink) Enabled(level int) bool { + return false +} + +// Error implements logr.Logger. +func (NullLogSink) Error(_ error, _ string, _ ...interface{}) { + // Do nothing. +} + +// WithName implements logr.Logger. +func (log NullLogSink) WithName(_ string) logr.LogSink { + return log +} + +// WithValues implements logr.Logger. +func (log NullLogSink) WithValues(_ ...interface{}) logr.LogSink { + return log +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go new file mode 100644 index 000000000..e9522632d --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go @@ -0,0 +1,76 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "sync" + + "github.com/go-logr/logr" +) + +// KubeAPIWarningLoggerOptions controls the behavior +// of a rest.WarningHandler constructed using NewKubeAPIWarningLogger(). +type KubeAPIWarningLoggerOptions struct { + // Deduplicate indicates a given warning message should only be written once. + // Setting this to true in a long-running process handling many warnings can + // result in increased memory use. + Deduplicate bool +} + +// KubeAPIWarningLogger is a wrapper around +// a provided logr.Logger that implements the +// rest.WarningHandler interface. +type KubeAPIWarningLogger struct { + // logger is used to log responses with the warning header + logger logr.Logger + // opts contain options controlling warning output + opts KubeAPIWarningLoggerOptions + // writtenLock gurads written + writtenLock sync.Mutex + // used to keep track of already logged messages + // and help in de-duplication. + written map[string]struct{} +} + +// HandleWarningHeader handles logging for responses from API server that are +// warnings with code being 299 and uses a logr.Logger for its logging purposes. +func (l *KubeAPIWarningLogger) HandleWarningHeader(code int, agent string, message string) { + if code != 299 || len(message) == 0 { + return + } + + if l.opts.Deduplicate { + l.writtenLock.Lock() + defer l.writtenLock.Unlock() + + if _, alreadyLogged := l.written[message]; alreadyLogged { + return + } + l.written[message] = struct{}{} + } + l.logger.Info(message) +} + +// NewKubeAPIWarningLogger returns an implementation of rest.WarningHandler that logs warnings +// with code = 299 to the provided logr.Logger. +func NewKubeAPIWarningLogger(l logr.Logger, opts KubeAPIWarningLoggerOptions) *KubeAPIWarningLogger { + h := &KubeAPIWarningLogger{logger: l, opts: opts} + if opts.Deduplicate { + h.written = map[string]struct{}{} + } + return h +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/doc.go new file mode 100644 index 000000000..f2976c7f7 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package manager is required to create Controllers and provides shared dependencies such as clients, caches, schemes, +etc. Controllers must be started by calling Manager.Start. +*/ +package manager diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go new file mode 100644 index 000000000..5b22c628f --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go @@ -0,0 +1,652 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manager + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "sync" + "sync/atomic" + "time" + + "github.com/go-logr/logr" + "github.com/prometheus/client_golang/prometheus/promhttp" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" + "k8s.io/client-go/tools/record" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/internal/httpserver" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" + "sigs.k8s.io/controller-runtime/pkg/metrics" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +const ( + // Values taken from: https://github.com/kubernetes/component-base/blob/master/config/v1alpha1/defaults.go + defaultLeaseDuration = 15 * time.Second + defaultRenewDeadline = 10 * time.Second + defaultRetryPeriod = 2 * time.Second + defaultGracefulShutdownPeriod = 30 * time.Second + + defaultReadinessEndpoint = "/readyz" + defaultLivenessEndpoint = "/healthz" + defaultMetricsEndpoint = "/metrics" +) + +var _ Runnable = &controllerManager{} + +type controllerManager struct { + sync.Mutex + started bool + + stopProcedureEngaged *int64 + errChan chan error + runnables *runnables + + // cluster holds a variety of methods to interact with a cluster. Required. + cluster cluster.Cluster + + // recorderProvider is used to generate event recorders that will be injected into Controllers + // (and EventHandlers, Sources and Predicates). + recorderProvider *intrec.Provider + + // resourceLock forms the basis for leader election + resourceLock resourcelock.Interface + + // leaderElectionReleaseOnCancel defines if the manager should step back from the leader lease + // on shutdown + leaderElectionReleaseOnCancel bool + + // metricsListener is used to serve prometheus metrics + metricsListener net.Listener + + // metricsExtraHandlers contains extra handlers to register on http server that serves metrics. + metricsExtraHandlers map[string]http.Handler + + // healthProbeListener is used to serve liveness probe + healthProbeListener net.Listener + + // Readiness probe endpoint name + readinessEndpointName string + + // Liveness probe endpoint name + livenessEndpointName string + + // Readyz probe handler + readyzHandler *healthz.Handler + + // Healthz probe handler + healthzHandler *healthz.Handler + + // controllerOptions are the global controller options. + controllerOptions v1alpha1.ControllerConfigurationSpec + + // Logger is the logger that should be used by this manager. + // If none is set, it defaults to log.Log global logger. + logger logr.Logger + + // leaderElectionStopped is an internal channel used to signal the stopping procedure that the + // LeaderElection.Run(...) function has returned and the shutdown can proceed. + leaderElectionStopped chan struct{} + + // leaderElectionCancel is used to cancel the leader election. It is distinct from internalStopper, + // because for safety reasons we need to os.Exit() when we lose the leader election, meaning that + // it must be deferred until after gracefulShutdown is done. + leaderElectionCancel context.CancelFunc + + // elected is closed when this manager becomes the leader of a group of + // managers, either because it won a leader election or because no leader + // election was configured. + elected chan struct{} + + // port is the port that the webhook server serves at. + port int + // host is the hostname that the webhook server binds to. + host string + // CertDir is the directory that contains the server key and certificate. + // if not set, webhook server would look up the server key and certificate in + // {TempDir}/k8s-webhook-server/serving-certs + certDir string + + webhookServer *webhook.Server + // webhookServerOnce will be called in GetWebhookServer() to optionally initialize + // webhookServer if unset, and Add() it to controllerManager. + webhookServerOnce sync.Once + + // leaseDuration is the duration that non-leader candidates will + // wait to force acquire leadership. + leaseDuration time.Duration + // renewDeadline is the duration that the acting controlplane will retry + // refreshing leadership before giving up. + renewDeadline time.Duration + // retryPeriod is the duration the LeaderElector clients should wait + // between tries of actions. + retryPeriod time.Duration + + // gracefulShutdownTimeout is the duration given to runnable to stop + // before the manager actually returns on stop. + gracefulShutdownTimeout time.Duration + + // onStoppedLeading is callled when the leader election lease is lost. + // It can be overridden for tests. + onStoppedLeading func() + + // shutdownCtx is the context that can be used during shutdown. It will be cancelled + // after the gracefulShutdownTimeout ended. It must not be accessed before internalStop + // is closed because it will be nil. + shutdownCtx context.Context + + internalCtx context.Context + internalCancel context.CancelFunc + + // internalProceduresStop channel is used internally to the manager when coordinating + // the proper shutdown of servers. This channel is also used for dependency injection. + internalProceduresStop chan struct{} +} + +type hasCache interface { + Runnable + GetCache() cache.Cache +} + +// Add sets dependencies on i, and adds it to the list of Runnables to start. +func (cm *controllerManager) Add(r Runnable) error { + cm.Lock() + defer cm.Unlock() + return cm.add(r) +} + +func (cm *controllerManager) add(r Runnable) error { + // Set dependencies on the object + if err := cm.SetFields(r); err != nil { + return err + } + return cm.runnables.Add(r) +} + +// Deprecated: use the equivalent Options field to set a field. This method will be removed in v0.10. +func (cm *controllerManager) SetFields(i interface{}) error { + if err := cm.cluster.SetFields(i); err != nil { + return err + } + if _, err := inject.InjectorInto(cm.SetFields, i); err != nil { + return err + } + if _, err := inject.StopChannelInto(cm.internalProceduresStop, i); err != nil { + return err + } + if _, err := inject.LoggerInto(cm.logger, i); err != nil { + return err + } + + return nil +} + +// AddMetricsExtraHandler adds extra handler served on path to the http server that serves metrics. +func (cm *controllerManager) AddMetricsExtraHandler(path string, handler http.Handler) error { + cm.Lock() + defer cm.Unlock() + + if cm.started { + return fmt.Errorf("unable to add new metrics handler because metrics endpoint has already been created") + } + + if path == defaultMetricsEndpoint { + return fmt.Errorf("overriding builtin %s endpoint is not allowed", defaultMetricsEndpoint) + } + + if _, found := cm.metricsExtraHandlers[path]; found { + return fmt.Errorf("can't register extra handler by duplicate path %q on metrics http server", path) + } + + cm.metricsExtraHandlers[path] = handler + cm.logger.V(2).Info("Registering metrics http server extra handler", "path", path) + return nil +} + +// AddHealthzCheck allows you to add Healthz checker. +func (cm *controllerManager) AddHealthzCheck(name string, check healthz.Checker) error { + cm.Lock() + defer cm.Unlock() + + if cm.started { + return fmt.Errorf("unable to add new checker because healthz endpoint has already been created") + } + + if cm.healthzHandler == nil { + cm.healthzHandler = &healthz.Handler{Checks: map[string]healthz.Checker{}} + } + + cm.healthzHandler.Checks[name] = check + return nil +} + +// AddReadyzCheck allows you to add Readyz checker. +func (cm *controllerManager) AddReadyzCheck(name string, check healthz.Checker) error { + cm.Lock() + defer cm.Unlock() + + if cm.started { + return fmt.Errorf("unable to add new checker because healthz endpoint has already been created") + } + + if cm.readyzHandler == nil { + cm.readyzHandler = &healthz.Handler{Checks: map[string]healthz.Checker{}} + } + + cm.readyzHandler.Checks[name] = check + return nil +} + +func (cm *controllerManager) GetConfig() *rest.Config { + return cm.cluster.GetConfig() +} + +func (cm *controllerManager) GetClient() client.Client { + return cm.cluster.GetClient() +} + +func (cm *controllerManager) GetScheme() *runtime.Scheme { + return cm.cluster.GetScheme() +} + +func (cm *controllerManager) GetFieldIndexer() client.FieldIndexer { + return cm.cluster.GetFieldIndexer() +} + +func (cm *controllerManager) GetCache() cache.Cache { + return cm.cluster.GetCache() +} + +func (cm *controllerManager) GetEventRecorderFor(name string) record.EventRecorder { + return cm.cluster.GetEventRecorderFor(name) +} + +func (cm *controllerManager) GetRESTMapper() meta.RESTMapper { + return cm.cluster.GetRESTMapper() +} + +func (cm *controllerManager) GetAPIReader() client.Reader { + return cm.cluster.GetAPIReader() +} + +func (cm *controllerManager) GetWebhookServer() *webhook.Server { + cm.webhookServerOnce.Do(func() { + if cm.webhookServer == nil { + cm.webhookServer = &webhook.Server{ + Port: cm.port, + Host: cm.host, + CertDir: cm.certDir, + } + } + if err := cm.Add(cm.webhookServer); err != nil { + panic(fmt.Sprintf("unable to add webhook server to the controller manager: %s", err)) + } + }) + return cm.webhookServer +} + +func (cm *controllerManager) GetLogger() logr.Logger { + return cm.logger +} + +func (cm *controllerManager) GetControllerOptions() v1alpha1.ControllerConfigurationSpec { + return cm.controllerOptions +} + +func (cm *controllerManager) serveMetrics() { + handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{ + ErrorHandling: promhttp.HTTPErrorOnError, + }) + // TODO(JoelSpeed): Use existing Kubernetes machinery for serving metrics + mux := http.NewServeMux() + mux.Handle(defaultMetricsEndpoint, handler) + for path, extraHandler := range cm.metricsExtraHandlers { + mux.Handle(path, extraHandler) + } + + server := httpserver.New(mux) + go cm.httpServe("metrics", cm.logger.WithValues("path", defaultMetricsEndpoint), server, cm.metricsListener) +} + +func (cm *controllerManager) serveHealthProbes() { + mux := http.NewServeMux() + server := httpserver.New(mux) + + if cm.readyzHandler != nil { + mux.Handle(cm.readinessEndpointName, http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler)) + // Append '/' suffix to handle subpaths + mux.Handle(cm.readinessEndpointName+"/", http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler)) + } + if cm.healthzHandler != nil { + mux.Handle(cm.livenessEndpointName, http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler)) + // Append '/' suffix to handle subpaths + mux.Handle(cm.livenessEndpointName+"/", http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler)) + } + + go cm.httpServe("health probe", cm.logger, server, cm.healthProbeListener) +} + +func (cm *controllerManager) httpServe(kind string, log logr.Logger, server *http.Server, ln net.Listener) { + log = log.WithValues("kind", kind, "addr", ln.Addr()) + + go func() { + log.Info("Starting server") + if err := server.Serve(ln); err != nil { + if errors.Is(err, http.ErrServerClosed) { + return + } + if atomic.LoadInt64(cm.stopProcedureEngaged) > 0 { + // There might be cases where connections are still open and we try to shutdown + // but not having enough time to close the connection causes an error in Serve + // + // In that case we want to avoid returning an error to the main error channel. + log.Error(err, "error on Serve after stop has been engaged") + return + } + cm.errChan <- err + } + }() + + // Shutdown the server when stop is closed. + <-cm.internalProceduresStop + if err := server.Shutdown(cm.shutdownCtx); err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + // Avoid logging context related errors. + return + } + if atomic.LoadInt64(cm.stopProcedureEngaged) > 0 { + cm.logger.Error(err, "error on Shutdown after stop has been engaged") + return + } + cm.errChan <- err + } +} + +// Start starts the manager and waits indefinitely. +// There is only two ways to have start return: +// An error has occurred during in one of the internal operations, +// such as leader election, cache start, webhooks, and so on. +// Or, the context is cancelled. +func (cm *controllerManager) Start(ctx context.Context) (err error) { + cm.Lock() + if cm.started { + cm.Unlock() + return errors.New("manager already started") + } + var ready bool + defer func() { + // Only unlock the manager if we haven't reached + // the internal readiness condition. + if !ready { + cm.Unlock() + } + }() + + // Initialize the internal context. + cm.internalCtx, cm.internalCancel = context.WithCancel(ctx) + + // This chan indicates that stop is complete, in other words all runnables have returned or timeout on stop request + stopComplete := make(chan struct{}) + defer close(stopComplete) + // This must be deferred after closing stopComplete, otherwise we deadlock. + defer func() { + // https://hips.hearstapps.com/hmg-prod.s3.amazonaws.com/images/gettyimages-459889618-1533579787.jpg + stopErr := cm.engageStopProcedure(stopComplete) + if stopErr != nil { + if err != nil { + // Utilerrors.Aggregate allows to use errors.Is for all contained errors + // whereas fmt.Errorf allows wrapping at most one error which means the + // other one can not be found anymore. + err = kerrors.NewAggregate([]error{err, stopErr}) + } else { + err = stopErr + } + } + }() + + // Add the cluster runnable. + if err := cm.add(cm.cluster); err != nil { + return fmt.Errorf("failed to add cluster to runnables: %w", err) + } + + // Metrics should be served whether the controller is leader or not. + // (If we don't serve metrics for non-leaders, prometheus will still scrape + // the pod but will get a connection refused). + if cm.metricsListener != nil { + cm.serveMetrics() + } + + // Serve health probes. + if cm.healthProbeListener != nil { + cm.serveHealthProbes() + } + + // First start any webhook servers, which includes conversion, validation, and defaulting + // webhooks that are registered. + // + // WARNING: Webhooks MUST start before any cache is populated, otherwise there is a race condition + // between conversion webhooks and the cache sync (usually initial list) which causes the webhooks + // to never start because no cache can be populated. + if err := cm.runnables.Webhooks.Start(cm.internalCtx); err != nil { + if !errors.Is(err, wait.ErrWaitTimeout) { + return err + } + } + + // Start and wait for caches. + if err := cm.runnables.Caches.Start(cm.internalCtx); err != nil { + if !errors.Is(err, wait.ErrWaitTimeout) { + return err + } + } + + // Start the non-leaderelection Runnables after the cache has synced. + if err := cm.runnables.Others.Start(cm.internalCtx); err != nil { + if !errors.Is(err, wait.ErrWaitTimeout) { + return err + } + } + + // Start the leader election and all required runnables. + { + ctx, cancel := context.WithCancel(context.Background()) + cm.leaderElectionCancel = cancel + go func() { + if cm.resourceLock != nil { + if err := cm.startLeaderElection(ctx); err != nil { + cm.errChan <- err + } + } else { + // Treat not having leader election enabled the same as being elected. + if err := cm.startLeaderElectionRunnables(); err != nil { + cm.errChan <- err + } + close(cm.elected) + } + }() + } + + ready = true + cm.Unlock() + select { + case <-ctx.Done(): + // We are done + return nil + case err := <-cm.errChan: + // Error starting or running a runnable + return err + } +} + +// engageStopProcedure signals all runnables to stop, reads potential errors +// from the errChan and waits for them to end. It must not be called more than once. +func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) error { + if !atomic.CompareAndSwapInt64(cm.stopProcedureEngaged, 0, 1) { + return errors.New("stop procedure already engaged") + } + + // Populate the shutdown context, this operation MUST be done before + // closing the internalProceduresStop channel. + // + // The shutdown context immediately expires if the gracefulShutdownTimeout is not set. + var shutdownCancel context.CancelFunc + cm.shutdownCtx, shutdownCancel = context.WithTimeout(context.Background(), cm.gracefulShutdownTimeout) + defer shutdownCancel() + + // Start draining the errors before acquiring the lock to make sure we don't deadlock + // if something that has the lock is blocked on trying to write into the unbuffered + // channel after something else already wrote into it. + var closeOnce sync.Once + go func() { + for { + // Closing in the for loop is required to avoid race conditions between + // the closure of all internal procedures and making sure to have a reader off the error channel. + closeOnce.Do(func() { + // Cancel the internal stop channel and wait for the procedures to stop and complete. + close(cm.internalProceduresStop) + cm.internalCancel() + }) + select { + case err, ok := <-cm.errChan: + if ok { + cm.logger.Error(err, "error received after stop sequence was engaged") + } + case <-stopComplete: + return + } + } + }() + + // We want to close this after the other runnables stop, because we don't + // want things like leader election to try and emit events on a closed + // channel + defer cm.recorderProvider.Stop(cm.shutdownCtx) + defer func() { + // Cancel leader election only after we waited. It will os.Exit() the app for safety. + if cm.resourceLock != nil { + // After asking the context to be cancelled, make sure + // we wait for the leader stopped channel to be closed, otherwise + // we might encounter race conditions between this code + // and the event recorder, which is used within leader election code. + cm.leaderElectionCancel() + <-cm.leaderElectionStopped + } + }() + + go func() { + // First stop the non-leader election runnables. + cm.logger.Info("Stopping and waiting for non leader election runnables") + cm.runnables.Others.StopAndWait(cm.shutdownCtx) + + // Stop all the leader election runnables, which includes reconcilers. + cm.logger.Info("Stopping and waiting for leader election runnables") + cm.runnables.LeaderElection.StopAndWait(cm.shutdownCtx) + + // Stop the caches before the leader election runnables, this is an important + // step to make sure that we don't race with the reconcilers by receiving more events + // from the API servers and enqueueing them. + cm.logger.Info("Stopping and waiting for caches") + cm.runnables.Caches.StopAndWait(cm.shutdownCtx) + + // Webhooks should come last, as they might be still serving some requests. + cm.logger.Info("Stopping and waiting for webhooks") + cm.runnables.Webhooks.StopAndWait(cm.shutdownCtx) + + // Proceed to close the manager and overall shutdown context. + cm.logger.Info("Wait completed, proceeding to shutdown the manager") + shutdownCancel() + }() + + <-cm.shutdownCtx.Done() + if err := cm.shutdownCtx.Err(); err != nil && !errors.Is(err, context.Canceled) { + if errors.Is(err, context.DeadlineExceeded) { + if cm.gracefulShutdownTimeout > 0 { + return fmt.Errorf("failed waiting for all runnables to end within grace period of %s: %w", cm.gracefulShutdownTimeout, err) + } + return nil + } + // For any other error, return the error. + return err + } + + return nil +} + +func (cm *controllerManager) startLeaderElectionRunnables() error { + return cm.runnables.LeaderElection.Start(cm.internalCtx) +} + +func (cm *controllerManager) startLeaderElection(ctx context.Context) (err error) { + l, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{ + Lock: cm.resourceLock, + LeaseDuration: cm.leaseDuration, + RenewDeadline: cm.renewDeadline, + RetryPeriod: cm.retryPeriod, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: func(_ context.Context) { + if err := cm.startLeaderElectionRunnables(); err != nil { + cm.errChan <- err + return + } + close(cm.elected) + }, + OnStoppedLeading: func() { + if cm.onStoppedLeading != nil { + cm.onStoppedLeading() + } + // Make sure graceful shutdown is skipped if we lost the leader lock without + // intending to. + cm.gracefulShutdownTimeout = time.Duration(0) + // Most implementations of leader election log.Fatal() here. + // Since Start is wrapped in log.Fatal when called, we can just return + // an error here which will cause the program to exit. + cm.errChan <- errors.New("leader election lost") + }, + }, + ReleaseOnCancel: cm.leaderElectionReleaseOnCancel, + }) + if err != nil { + return err + } + + // Start the leader elector process + go func() { + l.Run(ctx) + <-ctx.Done() + close(cm.leaderElectionStopped) + }() + return nil +} + +func (cm *controllerManager) Elected() <-chan struct{} { + return cm.elected +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go new file mode 100644 index 000000000..5f76b5a83 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go @@ -0,0 +1,620 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manager + +import ( + "context" + "fmt" + "net" + "net/http" + "reflect" + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/leaderelection/resourcelock" + "k8s.io/client-go/tools/record" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/config" + "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/healthz" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" + "sigs.k8s.io/controller-runtime/pkg/leaderelection" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/metrics" + "sigs.k8s.io/controller-runtime/pkg/recorder" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +// Manager initializes shared dependencies such as Caches and Clients, and provides them to Runnables. +// A Manager is required to create Controllers. +type Manager interface { + // Cluster holds a variety of methods to interact with a cluster. + cluster.Cluster + + // Add will set requested dependencies on the component, and cause the component to be + // started when Start is called. Add will inject any dependencies for which the argument + // implements the inject interface - e.g. inject.Client. + // Depending on if a Runnable implements LeaderElectionRunnable interface, a Runnable can be run in either + // non-leaderelection mode (always running) or leader election mode (managed by leader election if enabled). + Add(Runnable) error + + // Elected is closed when this manager is elected leader of a group of + // managers, either because it won a leader election or because no leader + // election was configured. + Elected() <-chan struct{} + + // AddMetricsExtraHandler adds an extra handler served on path to the http server that serves metrics. + // Might be useful to register some diagnostic endpoints e.g. pprof. Note that these endpoints meant to be + // sensitive and shouldn't be exposed publicly. + // If the simple path -> handler mapping offered here is not enough, a new http server/listener should be added as + // Runnable to the manager via Add method. + AddMetricsExtraHandler(path string, handler http.Handler) error + + // AddHealthzCheck allows you to add Healthz checker + AddHealthzCheck(name string, check healthz.Checker) error + + // AddReadyzCheck allows you to add Readyz checker + AddReadyzCheck(name string, check healthz.Checker) error + + // Start starts all registered Controllers and blocks until the context is cancelled. + // Returns an error if there is an error starting any controller. + // + // If LeaderElection is used, the binary must be exited immediately after this returns, + // otherwise components that need leader election might continue to run after the leader + // lock was lost. + Start(ctx context.Context) error + + // GetWebhookServer returns a webhook.Server + GetWebhookServer() *webhook.Server + + // GetLogger returns this manager's logger. + GetLogger() logr.Logger + + // GetControllerOptions returns controller global configuration options. + GetControllerOptions() v1alpha1.ControllerConfigurationSpec +} + +// Options are the arguments for creating a new Manager. +type Options struct { + // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources. + // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better + // to pass your own scheme in. See the documentation in pkg/scheme for more information. + Scheme *runtime.Scheme + + // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs + MapperProvider func(c *rest.Config) (meta.RESTMapper, error) + + // SyncPeriod determines the minimum frequency at which watched resources are + // reconciled. A lower period will correct entropy more quickly, but reduce + // responsiveness to change if there are many watched resources. Change this + // value only if you know what you are doing. Defaults to 10 hours if unset. + // there will a 10 percent jitter between the SyncPeriod of all controllers + // so that all controllers will not send list requests simultaneously. + // + // This applies to all controllers. + // + // A period sync happens for two reasons: + // 1. To insure against a bug in the controller that causes an object to not + // be requeued, when it otherwise should be requeued. + // 2. To insure against an unknown bug in controller-runtime, or its dependencies, + // that causes an object to not be requeued, when it otherwise should be + // requeued, or to be removed from the queue, when it otherwise should not + // be removed. + // + // If you want + // 1. to insure against missed watch events, or + // 2. to poll services that cannot be watched, + // then we recommend that, instead of changing the default period, the + // controller requeue, with a constant duration `t`, whenever the controller + // is "done" with an object, and would otherwise not requeue it, i.e., we + // recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`, + // instead of `reconcile.Result{}`. + SyncPeriod *time.Duration + + // Logger is the logger that should be used by this manager. + // If none is set, it defaults to log.Log global logger. + Logger logr.Logger + + // LeaderElection determines whether or not to use leader election when + // starting the manager. + LeaderElection bool + + // LeaderElectionResourceLock determines which resource lock to use for leader election, + // defaults to "configmapsleases". Change this value only if you know what you are doing. + // Otherwise, users of your controller might end up with multiple running instances that + // each acquired leadership through different resource locks during upgrades and thus + // act on the same resources concurrently. + // If you want to migrate to the "leases" resource lock, you might do so by migrating to the + // respective multilock first ("configmapsleases" or "endpointsleases"), which will acquire a + // leader lock on both resources. After all your users have migrated to the multilock, you can + // go ahead and migrate to "leases". Please also keep in mind, that users might skip versions + // of your controller. + // + // Note: before controller-runtime version v0.7, the resource lock was set to "configmaps". + // Please keep this in mind, when planning a proper migration path for your controller. + LeaderElectionResourceLock string + + // LeaderElectionNamespace determines the namespace in which the leader + // election resource will be created. + LeaderElectionNamespace string + + // LeaderElectionID determines the name of the resource that leader election + // will use for holding the leader lock. + LeaderElectionID string + + // LeaderElectionConfig can be specified to override the default configuration + // that is used to build the leader election client. + LeaderElectionConfig *rest.Config + + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader doesn't have to wait + // LeaseDuration time first. + LeaderElectionReleaseOnCancel bool + + // LeaseDuration is the duration that non-leader candidates will + // wait to force acquire leadership. This is measured against time of + // last observed ack. Default is 15 seconds. + LeaseDuration *time.Duration + // RenewDeadline is the duration that the acting controlplane will retry + // refreshing leadership before giving up. Default is 10 seconds. + RenewDeadline *time.Duration + // RetryPeriod is the duration the LeaderElector clients should wait + // between tries of actions. Default is 2 seconds. + RetryPeriod *time.Duration + + // Namespace, if specified, restricts the manager's cache to watch objects in + // the desired namespace. Defaults to all namespaces. + // + // Note: If a namespace is specified, controllers can still Watch for a + // cluster-scoped resource (e.g Node). For namespaced resources, the cache + // will only hold objects from the desired namespace. + Namespace string + + // MetricsBindAddress is the TCP address that the controller should bind to + // for serving prometheus metrics. + // It can be set to "0" to disable the metrics serving. + MetricsBindAddress string + + // HealthProbeBindAddress is the TCP address that the controller should bind to + // for serving health probes + HealthProbeBindAddress string + + // Readiness probe endpoint name, defaults to "readyz" + ReadinessEndpointName string + + // Liveness probe endpoint name, defaults to "healthz" + LivenessEndpointName string + + // Port is the port that the webhook server serves at. + // It is used to set webhook.Server.Port if WebhookServer is not set. + Port int + // Host is the hostname that the webhook server binds to. + // It is used to set webhook.Server.Host if WebhookServer is not set. + Host string + + // CertDir is the directory that contains the server key and certificate. + // If not set, webhook server would look up the server key and certificate in + // {TempDir}/k8s-webhook-server/serving-certs. The server key and certificate + // must be named tls.key and tls.crt, respectively. + // It is used to set webhook.Server.CertDir if WebhookServer is not set. + CertDir string + + // WebhookServer is an externally configured webhook.Server. By default, + // a Manager will create a default server using Port, Host, and CertDir; + // if this is set, the Manager will use this server instead. + WebhookServer *webhook.Server + + // Functions to allow for a user to customize values that will be injected. + + // NewCache is the function that will create the cache to be used + // by the manager. If not set this will use the default new cache function. + NewCache cache.NewCacheFunc + + // NewClient is the func that creates the client to be used by the manager. + // If not set this will create the default DelegatingClient that will + // use the cache for reads and the client for writes. + NewClient cluster.NewClientFunc + + // BaseContext is the function that provides Context values to Runnables + // managed by the Manager. If a BaseContext function isn't provided, Runnables + // will receive a new Background Context instead. + BaseContext BaseContextFunc + + // ClientDisableCacheFor tells the client that, if any cache is used, to bypass it + // for the given objects. + ClientDisableCacheFor []client.Object + + // DryRunClient specifies whether the client should be configured to enforce + // dryRun mode. + DryRunClient bool + + // EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API + // Use this to customize the event correlator and spam filter + // + // Deprecated: using this may cause goroutine leaks if the lifetime of your manager or controllers + // is shorter than the lifetime of your process. + EventBroadcaster record.EventBroadcaster + + // GracefulShutdownTimeout is the duration given to runnable to stop before the manager actually returns on stop. + // To disable graceful shutdown, set to time.Duration(0) + // To use graceful shutdown without timeout, set to a negative duration, e.G. time.Duration(-1) + // The graceful shutdown is skipped for safety reasons in case the leader election lease is lost. + GracefulShutdownTimeout *time.Duration + + // Controller contains global configuration options for controllers + // registered within this manager. + // +optional + Controller v1alpha1.ControllerConfigurationSpec + + // makeBroadcaster allows deferring the creation of the broadcaster to + // avoid leaking goroutines if we never call Start on this manager. It also + // returns whether or not this is a "owned" broadcaster, and as such should be + // stopped with the manager. + makeBroadcaster intrec.EventBroadcasterProducer + + // Dependency injection for testing + newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) + newResourceLock func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) + newMetricsListener func(addr string) (net.Listener, error) + newHealthProbeListener func(addr string) (net.Listener, error) +} + +// BaseContextFunc is a function used to provide a base Context to Runnables +// managed by a Manager. +type BaseContextFunc func() context.Context + +// Runnable allows a component to be started. +// It's very important that Start blocks until +// it's done running. +type Runnable interface { + // Start starts running the component. The component will stop running + // when the context is closed. Start blocks until the context is closed or + // an error occurs. + Start(context.Context) error +} + +// RunnableFunc implements Runnable using a function. +// It's very important that the given function block +// until it's done running. +type RunnableFunc func(context.Context) error + +// Start implements Runnable. +func (r RunnableFunc) Start(ctx context.Context) error { + return r(ctx) +} + +// LeaderElectionRunnable knows if a Runnable needs to be run in the leader election mode. +type LeaderElectionRunnable interface { + // NeedLeaderElection returns true if the Runnable needs to be run in the leader election mode. + // e.g. controllers need to be run in leader election mode, while webhook server doesn't. + NeedLeaderElection() bool +} + +// New returns a new Manager for creating Controllers. +func New(config *rest.Config, options Options) (Manager, error) { + // Set default values for options fields + options = setOptionsDefaults(options) + + cluster, err := cluster.New(config, func(clusterOptions *cluster.Options) { + clusterOptions.Scheme = options.Scheme + clusterOptions.MapperProvider = options.MapperProvider + clusterOptions.Logger = options.Logger + clusterOptions.SyncPeriod = options.SyncPeriod + clusterOptions.Namespace = options.Namespace + clusterOptions.NewCache = options.NewCache + clusterOptions.NewClient = options.NewClient + clusterOptions.ClientDisableCacheFor = options.ClientDisableCacheFor + clusterOptions.DryRunClient = options.DryRunClient + clusterOptions.EventBroadcaster = options.EventBroadcaster //nolint:staticcheck + }) + if err != nil { + return nil, err + } + + // Create the recorder provider to inject event recorders for the components. + // TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific + // to the particular controller that it's being injected into, rather than a generic one like is here. + recorderProvider, err := options.newRecorderProvider(config, cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) + if err != nil { + return nil, err + } + + // Create the resource lock to enable leader election) + var leaderConfig *rest.Config + var leaderRecorderProvider *intrec.Provider + + if options.LeaderElectionConfig == nil { + leaderConfig = rest.CopyConfig(config) + leaderRecorderProvider = recorderProvider + } else { + leaderConfig = rest.CopyConfig(options.LeaderElectionConfig) + leaderRecorderProvider, err = options.newRecorderProvider(leaderConfig, cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) + if err != nil { + return nil, err + } + } + + resourceLock, err := options.newResourceLock(leaderConfig, leaderRecorderProvider, leaderelection.Options{ + LeaderElection: options.LeaderElection, + LeaderElectionResourceLock: options.LeaderElectionResourceLock, + LeaderElectionID: options.LeaderElectionID, + LeaderElectionNamespace: options.LeaderElectionNamespace, + }) + if err != nil { + return nil, err + } + + // Create the metrics listener. This will throw an error if the metrics bind + // address is invalid or already in use. + metricsListener, err := options.newMetricsListener(options.MetricsBindAddress) + if err != nil { + return nil, err + } + + // By default we have no extra endpoints to expose on metrics http server. + metricsExtraHandlers := make(map[string]http.Handler) + + // Create health probes listener. This will throw an error if the bind + // address is invalid or already in use. + healthProbeListener, err := options.newHealthProbeListener(options.HealthProbeBindAddress) + if err != nil { + return nil, err + } + + errChan := make(chan error) + runnables := newRunnables(options.BaseContext, errChan) + + return &controllerManager{ + stopProcedureEngaged: pointer.Int64(0), + cluster: cluster, + runnables: runnables, + errChan: errChan, + recorderProvider: recorderProvider, + resourceLock: resourceLock, + metricsListener: metricsListener, + metricsExtraHandlers: metricsExtraHandlers, + controllerOptions: options.Controller, + logger: options.Logger, + elected: make(chan struct{}), + port: options.Port, + host: options.Host, + certDir: options.CertDir, + webhookServer: options.WebhookServer, + leaseDuration: *options.LeaseDuration, + renewDeadline: *options.RenewDeadline, + retryPeriod: *options.RetryPeriod, + healthProbeListener: healthProbeListener, + readinessEndpointName: options.ReadinessEndpointName, + livenessEndpointName: options.LivenessEndpointName, + gracefulShutdownTimeout: *options.GracefulShutdownTimeout, + internalProceduresStop: make(chan struct{}), + leaderElectionStopped: make(chan struct{}), + leaderElectionReleaseOnCancel: options.LeaderElectionReleaseOnCancel, + }, nil +} + +// AndFrom will use a supplied type and convert to Options +// any options already set on Options will be ignored, this is used to allow +// cli flags to override anything specified in the config file. +func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, error) { + if inj, wantsScheme := loader.(inject.Scheme); wantsScheme { + err := inj.InjectScheme(o.Scheme) + if err != nil { + return o, err + } + } + + newObj, err := loader.Complete() + if err != nil { + return o, err + } + + o = o.setLeaderElectionConfig(newObj) + + if o.SyncPeriod == nil && newObj.SyncPeriod != nil { + o.SyncPeriod = &newObj.SyncPeriod.Duration + } + + if o.Namespace == "" && newObj.CacheNamespace != "" { + o.Namespace = newObj.CacheNamespace + } + + if o.MetricsBindAddress == "" && newObj.Metrics.BindAddress != "" { + o.MetricsBindAddress = newObj.Metrics.BindAddress + } + + if o.HealthProbeBindAddress == "" && newObj.Health.HealthProbeBindAddress != "" { + o.HealthProbeBindAddress = newObj.Health.HealthProbeBindAddress + } + + if o.ReadinessEndpointName == "" && newObj.Health.ReadinessEndpointName != "" { + o.ReadinessEndpointName = newObj.Health.ReadinessEndpointName + } + + if o.LivenessEndpointName == "" && newObj.Health.LivenessEndpointName != "" { + o.LivenessEndpointName = newObj.Health.LivenessEndpointName + } + + if o.Port == 0 && newObj.Webhook.Port != nil { + o.Port = *newObj.Webhook.Port + } + + if o.Host == "" && newObj.Webhook.Host != "" { + o.Host = newObj.Webhook.Host + } + + if o.CertDir == "" && newObj.Webhook.CertDir != "" { + o.CertDir = newObj.Webhook.CertDir + } + + if newObj.Controller != nil { + if o.Controller.CacheSyncTimeout == nil && newObj.Controller.CacheSyncTimeout != nil { + o.Controller.CacheSyncTimeout = newObj.Controller.CacheSyncTimeout + } + + if len(o.Controller.GroupKindConcurrency) == 0 && len(newObj.Controller.GroupKindConcurrency) > 0 { + o.Controller.GroupKindConcurrency = newObj.Controller.GroupKindConcurrency + } + } + + return o, nil +} + +// AndFromOrDie will use options.AndFrom() and will panic if there are errors. +func (o Options) AndFromOrDie(loader config.ControllerManagerConfiguration) Options { + o, err := o.AndFrom(loader) + if err != nil { + panic(fmt.Sprintf("could not parse config file: %v", err)) + } + return o +} + +func (o Options) setLeaderElectionConfig(obj v1alpha1.ControllerManagerConfigurationSpec) Options { + if obj.LeaderElection == nil { + // The source does not have any configuration; noop + return o + } + + if !o.LeaderElection && obj.LeaderElection.LeaderElect != nil { + o.LeaderElection = *obj.LeaderElection.LeaderElect + } + + if o.LeaderElectionResourceLock == "" && obj.LeaderElection.ResourceLock != "" { + o.LeaderElectionResourceLock = obj.LeaderElection.ResourceLock + } + + if o.LeaderElectionNamespace == "" && obj.LeaderElection.ResourceNamespace != "" { + o.LeaderElectionNamespace = obj.LeaderElection.ResourceNamespace + } + + if o.LeaderElectionID == "" && obj.LeaderElection.ResourceName != "" { + o.LeaderElectionID = obj.LeaderElection.ResourceName + } + + if o.LeaseDuration == nil && !reflect.DeepEqual(obj.LeaderElection.LeaseDuration, metav1.Duration{}) { + o.LeaseDuration = &obj.LeaderElection.LeaseDuration.Duration + } + + if o.RenewDeadline == nil && !reflect.DeepEqual(obj.LeaderElection.RenewDeadline, metav1.Duration{}) { + o.RenewDeadline = &obj.LeaderElection.RenewDeadline.Duration + } + + if o.RetryPeriod == nil && !reflect.DeepEqual(obj.LeaderElection.RetryPeriod, metav1.Duration{}) { + o.RetryPeriod = &obj.LeaderElection.RetryPeriod.Duration + } + + return o +} + +// defaultHealthProbeListener creates the default health probes listener bound to the given address. +func defaultHealthProbeListener(addr string) (net.Listener, error) { + if addr == "" || addr == "0" { + return nil, nil + } + + ln, err := net.Listen("tcp", addr) + if err != nil { + return nil, fmt.Errorf("error listening on %s: %w", addr, err) + } + return ln, nil +} + +// defaultBaseContext is used as the BaseContext value in Options if one +// has not already been set. +func defaultBaseContext() context.Context { + return context.Background() +} + +// setOptionsDefaults set default values for Options fields. +func setOptionsDefaults(options Options) Options { + // Allow newResourceLock to be mocked + if options.newResourceLock == nil { + options.newResourceLock = leaderelection.NewResourceLock + } + + // Allow newRecorderProvider to be mocked + if options.newRecorderProvider == nil { + options.newRecorderProvider = intrec.NewProvider + } + + // This is duplicated with pkg/cluster, we need it here + // for the leader election and there to provide the user with + // an EventBroadcaster + if options.EventBroadcaster == nil { + // defer initialization to avoid leaking by default + options.makeBroadcaster = func() (record.EventBroadcaster, bool) { + return record.NewBroadcaster(), true + } + } else { + options.makeBroadcaster = func() (record.EventBroadcaster, bool) { + return options.EventBroadcaster, false + } + } + + if options.newMetricsListener == nil { + options.newMetricsListener = metrics.NewListener + } + leaseDuration, renewDeadline, retryPeriod := defaultLeaseDuration, defaultRenewDeadline, defaultRetryPeriod + if options.LeaseDuration == nil { + options.LeaseDuration = &leaseDuration + } + + if options.RenewDeadline == nil { + options.RenewDeadline = &renewDeadline + } + + if options.RetryPeriod == nil { + options.RetryPeriod = &retryPeriod + } + + if options.ReadinessEndpointName == "" { + options.ReadinessEndpointName = defaultReadinessEndpoint + } + + if options.LivenessEndpointName == "" { + options.LivenessEndpointName = defaultLivenessEndpoint + } + + if options.newHealthProbeListener == nil { + options.newHealthProbeListener = defaultHealthProbeListener + } + + if options.GracefulShutdownTimeout == nil { + gracefulShutdownTimeout := defaultGracefulShutdownPeriod + options.GracefulShutdownTimeout = &gracefulShutdownTimeout + } + + if options.Logger.GetSink() == nil { + options.Logger = log.Log + } + + if options.BaseContext == nil { + options.BaseContext = defaultBaseContext + } + + return options +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go new file mode 100644 index 000000000..f7b91a209 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go @@ -0,0 +1,297 @@ +package manager + +import ( + "context" + "errors" + "sync" + + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +var ( + errRunnableGroupStopped = errors.New("can't accept new runnable as stop procedure is already engaged") +) + +// readyRunnable encapsulates a runnable with +// a ready check. +type readyRunnable struct { + Runnable + Check runnableCheck + signalReady bool +} + +// runnableCheck can be passed to Add() to let the runnable group determine that a +// runnable is ready. A runnable check should block until a runnable is ready, +// if the returned result is false, the runnable is considered not ready and failed. +type runnableCheck func(ctx context.Context) bool + +// runnables handles all the runnables for a manager by grouping them accordingly to their +// type (webhooks, caches etc.). +type runnables struct { + Webhooks *runnableGroup + Caches *runnableGroup + LeaderElection *runnableGroup + Others *runnableGroup +} + +// newRunnables creates a new runnables object. +func newRunnables(baseContext BaseContextFunc, errChan chan error) *runnables { + return &runnables{ + Webhooks: newRunnableGroup(baseContext, errChan), + Caches: newRunnableGroup(baseContext, errChan), + LeaderElection: newRunnableGroup(baseContext, errChan), + Others: newRunnableGroup(baseContext, errChan), + } +} + +// Add adds a runnable to closest group of runnable that they belong to. +// +// Add should be able to be called before and after Start, but not after StopAndWait. +// Add should return an error when called during StopAndWait. +// The runnables added before Start are started when Start is called. +// The runnables added after Start are started directly. +func (r *runnables) Add(fn Runnable) error { + switch runnable := fn.(type) { + case hasCache: + return r.Caches.Add(fn, func(ctx context.Context) bool { + return runnable.GetCache().WaitForCacheSync(ctx) + }) + case *webhook.Server: + return r.Webhooks.Add(fn, nil) + case LeaderElectionRunnable: + if !runnable.NeedLeaderElection() { + return r.Others.Add(fn, nil) + } + return r.LeaderElection.Add(fn, nil) + default: + return r.LeaderElection.Add(fn, nil) + } +} + +// runnableGroup manages a group of runnables that are +// meant to be running together until StopAndWait is called. +// +// Runnables can be added to a group after the group has started +// but not after it's stopped or while shutting down. +type runnableGroup struct { + ctx context.Context + cancel context.CancelFunc + + start sync.Mutex + startOnce sync.Once + started bool + startQueue []*readyRunnable + startReadyCh chan *readyRunnable + + stop sync.RWMutex + stopOnce sync.Once + stopped bool + + // errChan is the error channel passed by the caller + // when the group is created. + // All errors are forwarded to this channel once they occur. + errChan chan error + + // ch is the internal channel where the runnables are read off from. + ch chan *readyRunnable + + // wg is an internal sync.WaitGroup that allows us to properly stop + // and wait for all the runnables to finish before returning. + wg *sync.WaitGroup +} + +func newRunnableGroup(baseContext BaseContextFunc, errChan chan error) *runnableGroup { + r := &runnableGroup{ + startReadyCh: make(chan *readyRunnable), + errChan: errChan, + ch: make(chan *readyRunnable), + wg: new(sync.WaitGroup), + } + + r.ctx, r.cancel = context.WithCancel(baseContext()) + return r +} + +// Started returns true if the group has started. +func (r *runnableGroup) Started() bool { + r.start.Lock() + defer r.start.Unlock() + return r.started +} + +// Start starts the group and waits for all +// initially registered runnables to start. +// It can only be called once, subsequent calls have no effect. +func (r *runnableGroup) Start(ctx context.Context) error { + var retErr error + + r.startOnce.Do(func() { + defer close(r.startReadyCh) + + // Start the internal reconciler. + go r.reconcile() + + // Start the group and queue up all + // the runnables that were added prior. + r.start.Lock() + r.started = true + for _, rn := range r.startQueue { + rn.signalReady = true + r.ch <- rn + } + r.start.Unlock() + + // If we don't have any queue, return. + if len(r.startQueue) == 0 { + return + } + + // Wait for all runnables to signal. + for { + select { + case <-ctx.Done(): + if err := ctx.Err(); !errors.Is(err, context.Canceled) { + retErr = err + } + case rn := <-r.startReadyCh: + for i, existing := range r.startQueue { + if existing == rn { + // Remove the item from the start queue. + r.startQueue = append(r.startQueue[:i], r.startQueue[i+1:]...) + break + } + } + // We're done waiting if the queue is empty, return. + if len(r.startQueue) == 0 { + return + } + } + } + }) + + return retErr +} + +// reconcile is our main entrypoint for every runnable added +// to this group. Its primary job is to read off the internal channel +// and schedule runnables while tracking their state. +func (r *runnableGroup) reconcile() { + for runnable := range r.ch { + // Handle stop. + // If the shutdown has been called we want to avoid + // adding new goroutines to the WaitGroup because Wait() + // panics if Add() is called after it. + { + r.stop.RLock() + if r.stopped { + // Drop any runnables if we're stopped. + r.errChan <- errRunnableGroupStopped + r.stop.RUnlock() + continue + } + + // Why is this here? + // When StopAndWait is called, if a runnable is in the process + // of being added, we could end up in a situation where + // the WaitGroup is incremented while StopAndWait has called Wait(), + // which would result in a panic. + r.wg.Add(1) + r.stop.RUnlock() + } + + // Start the runnable. + go func(rn *readyRunnable) { + go func() { + if rn.Check(r.ctx) { + if rn.signalReady { + r.startReadyCh <- rn + } + } + }() + + // If we return, the runnable ended cleanly + // or returned an error to the channel. + // + // We should always decrement the WaitGroup here. + defer r.wg.Done() + + // Start the runnable. + if err := rn.Start(r.ctx); err != nil { + r.errChan <- err + } + }(runnable) + } +} + +// Add should be able to be called before and after Start, but not after StopAndWait. +// Add should return an error when called during StopAndWait. +func (r *runnableGroup) Add(rn Runnable, ready runnableCheck) error { + r.stop.RLock() + if r.stopped { + r.stop.RUnlock() + return errRunnableGroupStopped + } + r.stop.RUnlock() + + if ready == nil { + ready = func(_ context.Context) bool { return true } + } + + readyRunnable := &readyRunnable{ + Runnable: rn, + Check: ready, + } + + // Handle start. + // If the overall runnable group isn't started yet + // we want to buffer the runnables and let Start() + // queue them up again later. + { + r.start.Lock() + + // Check if we're already started. + if !r.started { + // Store the runnable in the internal if not. + r.startQueue = append(r.startQueue, readyRunnable) + r.start.Unlock() + return nil + } + r.start.Unlock() + } + + // Enqueue the runnable. + r.ch <- readyRunnable + return nil +} + +// StopAndWait waits for all the runnables to finish before returning. +func (r *runnableGroup) StopAndWait(ctx context.Context) { + r.stopOnce.Do(func() { + // Close the reconciler channel once we're done. + defer close(r.ch) + + _ = r.Start(ctx) + r.stop.Lock() + // Store the stopped variable so we don't accept any new + // runnables for the time being. + r.stopped = true + r.stop.Unlock() + + // Cancel the internal channel. + r.cancel() + + done := make(chan struct{}) + go func() { + defer close(done) + // Wait for all the runnables to finish. + r.wg.Wait() + }() + + select { + case <-done: + // We're done, exit. + case <-ctx.Done(): + // Calling context has expired, exit. + } + }) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/doc.go new file mode 100644 index 000000000..737cc7eff --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package signals contains libraries for handling signals to gracefully +// shutdown the manager in combination with Kubernetes pod graceful termination +// policy. +package signals diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go new file mode 100644 index 000000000..a79cfb42d --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signals + +import ( + "context" + "os" + "os/signal" +) + +var onlyOneSignalHandler = make(chan struct{}) + +// SetupSignalHandler registers for SIGTERM and SIGINT. A context is returned +// which is canceled on one of these signals. If a second signal is caught, the program +// is terminated with exit code 1. +func SetupSignalHandler() context.Context { + close(onlyOneSignalHandler) // panics when called twice + + ctx, cancel := context.WithCancel(context.Background()) + + c := make(chan os.Signal, 2) + signal.Notify(c, shutdownSignals...) + go func() { + <-c + cancel() + <-c + os.Exit(1) // second signal. Exit directly. + }() + + return ctx +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_posix.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_posix.go new file mode 100644 index 000000000..a0f00a732 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_posix.go @@ -0,0 +1,27 @@ +//go:build !windows +// +build !windows + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signals + +import ( + "os" + "syscall" +) + +var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_windows.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_windows.go new file mode 100644 index 000000000..4907d573f --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_windows.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signals + +import ( + "os" +) + +var shutdownSignals = []os.Signal{os.Interrupt} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go new file mode 100644 index 000000000..d32ce2534 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go @@ -0,0 +1,231 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "net/url" + "time" + + "github.com/prometheus/client_golang/prometheus" + reflectormetrics "k8s.io/client-go/tools/cache" + clientmetrics "k8s.io/client-go/tools/metrics" +) + +// this file contains setup logic to initialize the myriad of places +// that client-go registers metrics. We copy the names and formats +// from Kubernetes so that we match the core controllers. + +// Metrics subsystem and all of the keys used by the rest client. +const ( + RestClientSubsystem = "rest_client" + LatencyKey = "request_latency_seconds" + ResultKey = "requests_total" +) + +// Metrics subsystem and all keys used by the reflectors. +const ( + ReflectorSubsystem = "reflector" + ListsTotalKey = "lists_total" + ListsDurationKey = "list_duration_seconds" + ItemsPerListKey = "items_per_list" + WatchesTotalKey = "watches_total" + ShortWatchesTotalKey = "short_watches_total" + WatchDurationKey = "watch_duration_seconds" + ItemsPerWatchKey = "items_per_watch" + LastResourceVersionKey = "last_resource_version" +) + +var ( + // client metrics. + + // RequestLatency reports the request latency in seconds per verb/URL. + // Deprecated: This metric is deprecated for removal in a future release: using the URL as a + // dimension results in cardinality explosion for some consumers. It was deprecated upstream + // in k8s v1.14 and hidden in v1.17 via https://github.com/kubernetes/kubernetes/pull/83836. + // It is not registered by default. To register: + // import ( + // clientmetrics "k8s.io/client-go/tools/metrics" + // clmetrics "sigs.k8s.io/controller-runtime/metrics" + // ) + // + // func init() { + // clmetrics.Registry.MustRegister(clmetrics.RequestLatency) + // clientmetrics.Register(clientmetrics.RegisterOpts{ + // RequestLatency: clmetrics.LatencyAdapter + // }) + // } + RequestLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: RestClientSubsystem, + Name: LatencyKey, + Help: "Request latency in seconds. Broken down by verb and URL.", + Buckets: prometheus.ExponentialBuckets(0.001, 2, 10), + }, []string{"verb", "url"}) + + requestResult = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: RestClientSubsystem, + Name: ResultKey, + Help: "Number of HTTP requests, partitioned by status code, method, and host.", + }, []string{"code", "method", "host"}) + + // reflector metrics. + + // TODO(directxman12): update these to be histograms once the metrics overhaul KEP + // PRs start landing. + + listsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: ReflectorSubsystem, + Name: ListsTotalKey, + Help: "Total number of API lists done by the reflectors", + }, []string{"name"}) + + listsDuration = prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Subsystem: ReflectorSubsystem, + Name: ListsDurationKey, + Help: "How long an API list takes to return and decode for the reflectors", + }, []string{"name"}) + + itemsPerList = prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Subsystem: ReflectorSubsystem, + Name: ItemsPerListKey, + Help: "How many items an API list returns to the reflectors", + }, []string{"name"}) + + watchesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: ReflectorSubsystem, + Name: WatchesTotalKey, + Help: "Total number of API watches done by the reflectors", + }, []string{"name"}) + + shortWatchesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: ReflectorSubsystem, + Name: ShortWatchesTotalKey, + Help: "Total number of short API watches done by the reflectors", + }, []string{"name"}) + + watchDuration = prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Subsystem: ReflectorSubsystem, + Name: WatchDurationKey, + Help: "How long an API watch takes to return and decode for the reflectors", + }, []string{"name"}) + + itemsPerWatch = prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Subsystem: ReflectorSubsystem, + Name: ItemsPerWatchKey, + Help: "How many items an API watch returns to the reflectors", + }, []string{"name"}) + + lastResourceVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: ReflectorSubsystem, + Name: LastResourceVersionKey, + Help: "Last resource version seen for the reflectors", + }, []string{"name"}) +) + +func init() { + registerClientMetrics() + registerReflectorMetrics() +} + +// registerClientMetrics sets up the client latency metrics from client-go. +func registerClientMetrics() { + // register the metrics with our registry + Registry.MustRegister(requestResult) + + // register the metrics with client-go + clientmetrics.Register(clientmetrics.RegisterOpts{ + RequestResult: &resultAdapter{metric: requestResult}, + }) +} + +// registerReflectorMetrics sets up reflector (reconcile) loop metrics. +func registerReflectorMetrics() { + Registry.MustRegister(listsTotal) + Registry.MustRegister(listsDuration) + Registry.MustRegister(itemsPerList) + Registry.MustRegister(watchesTotal) + Registry.MustRegister(shortWatchesTotal) + Registry.MustRegister(watchDuration) + Registry.MustRegister(itemsPerWatch) + Registry.MustRegister(lastResourceVersion) + + reflectormetrics.SetReflectorMetricsProvider(reflectorMetricsProvider{}) +} + +// this section contains adapters, implementations, and other sundry organic, artisanally +// hand-crafted syntax trees required to convince client-go that it actually wants to let +// someone use its metrics. + +// Client metrics adapters (method #1 for client-go metrics), +// copied (more-or-less directly) from k8s.io/kubernetes setup code +// (which isn't anywhere in an easily-importable place). + +// LatencyAdapter implements LatencyMetric. +type LatencyAdapter struct { + metric *prometheus.HistogramVec +} + +// Observe increments the request latency metric for the given verb/URL. +func (l *LatencyAdapter) Observe(_ context.Context, verb string, u url.URL, latency time.Duration) { + l.metric.WithLabelValues(verb, u.String()).Observe(latency.Seconds()) +} + +type resultAdapter struct { + metric *prometheus.CounterVec +} + +func (r *resultAdapter) Increment(_ context.Context, code, method, host string) { + r.metric.WithLabelValues(code, method, host).Inc() +} + +// Reflector metrics provider (method #2 for client-go metrics), +// copied (more-or-less directly) from k8s.io/kubernetes setup code +// (which isn't anywhere in an easily-importable place). + +type reflectorMetricsProvider struct{} + +func (reflectorMetricsProvider) NewListsMetric(name string) reflectormetrics.CounterMetric { + return listsTotal.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewListDurationMetric(name string) reflectormetrics.SummaryMetric { + return listsDuration.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewItemsInListMetric(name string) reflectormetrics.SummaryMetric { + return itemsPerList.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewWatchesMetric(name string) reflectormetrics.CounterMetric { + return watchesTotal.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewShortWatchesMetric(name string) reflectormetrics.CounterMetric { + return shortWatchesTotal.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewWatchDurationMetric(name string) reflectormetrics.SummaryMetric { + return watchDuration.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewItemsInWatchMetric(name string) reflectormetrics.SummaryMetric { + return itemsPerWatch.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewLastResourceVersionMetric(name string) reflectormetrics.GaugeMetric { + return lastResourceVersion.WithLabelValues(name) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/doc.go new file mode 100644 index 000000000..6ed9df951 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package metrics contains controller related metrics utilities +*/ +package metrics diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go new file mode 100644 index 000000000..123d8c15f --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go @@ -0,0 +1,52 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "net" + + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("metrics") + +// DefaultBindAddress sets the default bind address for the metrics listener +// The metrics is on by default. +var DefaultBindAddress = ":8080" + +// NewListener creates a new TCP listener bound to the given address. +func NewListener(addr string) (net.Listener, error) { + if addr == "" { + // If the metrics bind address is empty, default to ":8080" + addr = DefaultBindAddress + } + + // Add a case to disable metrics altogether + if addr == "0" { + return nil, nil + } + + log.Info("Metrics server is starting to listen", "addr", addr) + ln, err := net.Listen("tcp", addr) + if err != nil { + er := fmt.Errorf("error listening on %s: %w", addr, err) + log.Error(er, "metrics server failed to listen. You may want to disable the metrics server or use another port if it is due to conflicts") + return nil, er + } + return ln, nil +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go new file mode 100644 index 000000000..ce17124d5 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go @@ -0,0 +1,30 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// RegistererGatherer combines both parts of the API of a Prometheus +// registry, both the Registerer and the Gatherer interfaces. +type RegistererGatherer interface { + prometheus.Registerer + prometheus.Gatherer +} + +// Registry is a prometheus registry for storing metrics within the +// controller-runtime. +var Registry RegistererGatherer = prometheus.NewRegistry() diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go new file mode 100644 index 000000000..8ca47235d --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go @@ -0,0 +1,130 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "k8s.io/client-go/util/workqueue" +) + +// This file is copied and adapted from k8s.io/kubernetes/pkg/util/workqueue/prometheus +// which registers metrics to the default prometheus Registry. We require very +// similar functionality, but must register metrics to a different Registry. + +// Metrics subsystem and all keys used by the workqueue. +const ( + WorkQueueSubsystem = "workqueue" + DepthKey = "depth" + AddsKey = "adds_total" + QueueLatencyKey = "queue_duration_seconds" + WorkDurationKey = "work_duration_seconds" + UnfinishedWorkKey = "unfinished_work_seconds" + LongestRunningProcessorKey = "longest_running_processor_seconds" + RetriesKey = "retries_total" +) + +var ( + depth = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: DepthKey, + Help: "Current depth of workqueue", + }, []string{"name"}) + + adds = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: AddsKey, + Help: "Total number of adds handled by workqueue", + }, []string{"name"}) + + latency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: QueueLatencyKey, + Help: "How long in seconds an item stays in workqueue before being requested", + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }, []string{"name"}) + + workDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: WorkDurationKey, + Help: "How long in seconds processing an item from workqueue takes.", + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }, []string{"name"}) + + unfinished = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: UnfinishedWorkKey, + Help: "How many seconds of work has been done that " + + "is in progress and hasn't been observed by work_duration. Large " + + "values indicate stuck threads. One can deduce the number of stuck " + + "threads by observing the rate at which this increases.", + }, []string{"name"}) + + longestRunningProcessor = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: LongestRunningProcessorKey, + Help: "How many seconds has the longest running " + + "processor for workqueue been running.", + }, []string{"name"}) + + retries = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: RetriesKey, + Help: "Total number of retries handled by workqueue", + }, []string{"name"}) +) + +func init() { + Registry.MustRegister(depth) + Registry.MustRegister(adds) + Registry.MustRegister(latency) + Registry.MustRegister(workDuration) + Registry.MustRegister(unfinished) + Registry.MustRegister(longestRunningProcessor) + Registry.MustRegister(retries) + + workqueue.SetProvider(workqueueMetricsProvider{}) +} + +type workqueueMetricsProvider struct{} + +func (workqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { + return depth.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { + return adds.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { + return latency.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { + return workDuration.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { + return unfinished.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { + return longestRunningProcessor.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { + return retries.WithLabelValues(name) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/doc.go new file mode 100644 index 000000000..e498107ef --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package predicate defines Predicates used by Controllers to filter Events before they are provided to EventHandlers. +*/ +package predicate diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go new file mode 100644 index 000000000..8608e22ac --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go @@ -0,0 +1,352 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package predicate + +import ( + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +var log = logf.RuntimeLog.WithName("predicate").WithName("eventFilters") + +// Predicate filters events before enqueuing the keys. +type Predicate interface { + // Create returns true if the Create event should be processed + Create(event.CreateEvent) bool + + // Delete returns true if the Delete event should be processed + Delete(event.DeleteEvent) bool + + // Update returns true if the Update event should be processed + Update(event.UpdateEvent) bool + + // Generic returns true if the Generic event should be processed + Generic(event.GenericEvent) bool +} + +var _ Predicate = Funcs{} +var _ Predicate = ResourceVersionChangedPredicate{} +var _ Predicate = GenerationChangedPredicate{} +var _ Predicate = AnnotationChangedPredicate{} +var _ Predicate = or{} +var _ Predicate = and{} + +// Funcs is a function that implements Predicate. +type Funcs struct { + // Create returns true if the Create event should be processed + CreateFunc func(event.CreateEvent) bool + + // Delete returns true if the Delete event should be processed + DeleteFunc func(event.DeleteEvent) bool + + // Update returns true if the Update event should be processed + UpdateFunc func(event.UpdateEvent) bool + + // Generic returns true if the Generic event should be processed + GenericFunc func(event.GenericEvent) bool +} + +// Create implements Predicate. +func (p Funcs) Create(e event.CreateEvent) bool { + if p.CreateFunc != nil { + return p.CreateFunc(e) + } + return true +} + +// Delete implements Predicate. +func (p Funcs) Delete(e event.DeleteEvent) bool { + if p.DeleteFunc != nil { + return p.DeleteFunc(e) + } + return true +} + +// Update implements Predicate. +func (p Funcs) Update(e event.UpdateEvent) bool { + if p.UpdateFunc != nil { + return p.UpdateFunc(e) + } + return true +} + +// Generic implements Predicate. +func (p Funcs) Generic(e event.GenericEvent) bool { + if p.GenericFunc != nil { + return p.GenericFunc(e) + } + return true +} + +// NewPredicateFuncs returns a predicate funcs that applies the given filter function +// on CREATE, UPDATE, DELETE and GENERIC events. For UPDATE events, the filter is applied +// to the new object. +func NewPredicateFuncs(filter func(object client.Object) bool) Funcs { + return Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return filter(e.Object) + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return filter(e.ObjectNew) + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return filter(e.Object) + }, + GenericFunc: func(e event.GenericEvent) bool { + return filter(e.Object) + }, + } +} + +// ResourceVersionChangedPredicate implements a default update predicate function on resource version change. +type ResourceVersionChangedPredicate struct { + Funcs +} + +// Update implements default UpdateEvent filter for validating resource version change. +func (ResourceVersionChangedPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil { + log.Error(nil, "Update event has no old object to update", "event", e) + return false + } + if e.ObjectNew == nil { + log.Error(nil, "Update event has no new object to update", "event", e) + return false + } + + return e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() +} + +// GenerationChangedPredicate implements a default update predicate function on Generation change. +// +// This predicate will skip update events that have no change in the object's metadata.generation field. +// The metadata.generation field of an object is incremented by the API server when writes are made to the spec field of an object. +// This allows a controller to ignore update events where the spec is unchanged, and only the metadata and/or status fields are changed. +// +// For CustomResource objects the Generation is only incremented when the status subresource is enabled. +// +// Caveats: +// +// * The assumption that the Generation is incremented only on writing to the spec does not hold for all APIs. +// E.g For Deployment objects the Generation is also incremented on writes to the metadata.annotations field. +// For object types other than CustomResources be sure to verify which fields will trigger a Generation increment when they are written to. +// +// * With this predicate, any update events with writes only to the status field will not be reconciled. +// So in the event that the status block is overwritten or wiped by someone else the controller will not self-correct to restore the correct status. +type GenerationChangedPredicate struct { + Funcs +} + +// Update implements default UpdateEvent filter for validating generation change. +func (GenerationChangedPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil { + log.Error(nil, "Update event has no old object to update", "event", e) + return false + } + if e.ObjectNew == nil { + log.Error(nil, "Update event has no new object for update", "event", e) + return false + } + + return e.ObjectNew.GetGeneration() != e.ObjectOld.GetGeneration() +} + +// AnnotationChangedPredicate implements a default update predicate function on annotation change. +// +// This predicate will skip update events that have no change in the object's annotation. +// It is intended to be used in conjunction with the GenerationChangedPredicate, as in the following example: +// +// Controller.Watch( +// &source.Kind{Type: v1.MyCustomKind}, +// &handler.EnqueueRequestForObject{}, +// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.AnnotationChangedPredicate{})) +// +// This is mostly useful for controllers that needs to trigger both when the resource's generation is incremented +// (i.e., when the resource' .spec changes), or an annotation changes (e.g., for a staging/alpha API). +type AnnotationChangedPredicate struct { + Funcs +} + +// Update implements default UpdateEvent filter for validating annotation change. +func (AnnotationChangedPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil { + log.Error(nil, "Update event has no old object to update", "event", e) + return false + } + if e.ObjectNew == nil { + log.Error(nil, "Update event has no new object for update", "event", e) + return false + } + + return !reflect.DeepEqual(e.ObjectNew.GetAnnotations(), e.ObjectOld.GetAnnotations()) +} + +// LabelChangedPredicate implements a default update predicate function on label change. +// +// This predicate will skip update events that have no change in the object's label. +// It is intended to be used in conjunction with the GenerationChangedPredicate, as in the following example: +// +// Controller.Watch( +// &source.Kind{Type: v1.MyCustomKind}, +// &handler.EnqueueRequestForObject{}, +// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{})) +// +// This will be helpful when object's labels is carrying some extra specification information beyond object's spec, +// and the controller will be triggered if any valid spec change (not only in spec, but also in labels) happens. +type LabelChangedPredicate struct { + Funcs +} + +// Update implements default UpdateEvent filter for checking label change. +func (LabelChangedPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil { + log.Error(nil, "Update event has no old object to update", "event", e) + return false + } + if e.ObjectNew == nil { + log.Error(nil, "Update event has no new object for update", "event", e) + return false + } + + return !reflect.DeepEqual(e.ObjectNew.GetLabels(), e.ObjectOld.GetLabels()) +} + +// And returns a composite predicate that implements a logical AND of the predicates passed to it. +func And(predicates ...Predicate) Predicate { + return and{predicates} +} + +type and struct { + predicates []Predicate +} + +func (a and) InjectFunc(f inject.Func) error { + for _, p := range a.predicates { + if err := f(p); err != nil { + return err + } + } + return nil +} + +func (a and) Create(e event.CreateEvent) bool { + for _, p := range a.predicates { + if !p.Create(e) { + return false + } + } + return true +} + +func (a and) Update(e event.UpdateEvent) bool { + for _, p := range a.predicates { + if !p.Update(e) { + return false + } + } + return true +} + +func (a and) Delete(e event.DeleteEvent) bool { + for _, p := range a.predicates { + if !p.Delete(e) { + return false + } + } + return true +} + +func (a and) Generic(e event.GenericEvent) bool { + for _, p := range a.predicates { + if !p.Generic(e) { + return false + } + } + return true +} + +// Or returns a composite predicate that implements a logical OR of the predicates passed to it. +func Or(predicates ...Predicate) Predicate { + return or{predicates} +} + +type or struct { + predicates []Predicate +} + +func (o or) InjectFunc(f inject.Func) error { + for _, p := range o.predicates { + if err := f(p); err != nil { + return err + } + } + return nil +} + +func (o or) Create(e event.CreateEvent) bool { + for _, p := range o.predicates { + if p.Create(e) { + return true + } + } + return false +} + +func (o or) Update(e event.UpdateEvent) bool { + for _, p := range o.predicates { + if p.Update(e) { + return true + } + } + return false +} + +func (o or) Delete(e event.DeleteEvent) bool { + for _, p := range o.predicates { + if p.Delete(e) { + return true + } + } + return false +} + +func (o or) Generic(e event.GenericEvent) bool { + for _, p := range o.predicates { + if p.Generic(e) { + return true + } + } + return false +} + +// LabelSelectorPredicate constructs a Predicate from a LabelSelector. +// Only objects matching the LabelSelector will be admitted. +func LabelSelectorPredicate(s metav1.LabelSelector) (Predicate, error) { + selector, err := metav1.LabelSelectorAsSelector(&s) + if err != nil { + return Funcs{}, err + } + return NewPredicateFuncs(func(o client.Object) bool { + return selector.Matches(labels.Set(o.GetLabels())) + }), nil +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/doc.go new file mode 100644 index 000000000..a01d603fe --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package ratelimiter defines rate limiters used by Controllers to limit how frequently requests may be queued. + +Typical rate limiters that can be used are implemented in client-go's workqueue package. +*/ +package ratelimiter diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/ratelimiter.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/ratelimiter.go new file mode 100644 index 000000000..565a3a227 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/ratelimiter.go @@ -0,0 +1,30 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ratelimiter + +import "time" + +// RateLimiter is an identical interface of client-go workqueue RateLimiter. +type RateLimiter interface { + // When gets an item and gets to decide how long that item should wait + When(item interface{}) time.Duration + // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing + // or for success, we'll stop tracking it + Forget(item interface{}) + // NumRequeues returns back how many failures the item has had + NumRequeues(item interface{}) int +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/doc.go new file mode 100644 index 000000000..d221dd7b3 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package reconcile defines the Reconciler interface to implement Kubernetes APIs. Reconciler is provided +to Controllers at creation time as the API implementation. +*/ +package reconcile diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go new file mode 100644 index 000000000..b044e6594 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go @@ -0,0 +1,102 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconcile + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/types" +) + +// Result contains the result of a Reconciler invocation. +type Result struct { + // Requeue tells the Controller to requeue the reconcile key. Defaults to false. + Requeue bool + + // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. + // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. + RequeueAfter time.Duration +} + +// IsZero returns true if this result is empty. +func (r *Result) IsZero() bool { + if r == nil { + return true + } + return *r == Result{} +} + +// Request contains the information necessary to reconcile a Kubernetes object. This includes the +// information to uniquely identify the object - its Name and Namespace. It does NOT contain information about +// any specific Event or the object contents itself. +type Request struct { + // NamespacedName is the name and namespace of the object to reconcile. + types.NamespacedName +} + +/* +Reconciler implements a Kubernetes API for a specific Resource by Creating, Updating or Deleting Kubernetes +objects, or by making changes to systems external to the cluster (e.g. cloudproviders, github, etc). + +reconcile implementations compare the state specified in an object by a user against the actual cluster state, +and then perform operations to make the actual cluster state reflect the state specified by the user. + +Typically, reconcile is triggered by a Controller in response to cluster Events (e.g. Creating, Updating, +Deleting Kubernetes objects) or external Events (GitHub Webhooks, polling external sources, etc). + +Example reconcile Logic: + + * Read an object and all the Pods it owns. + * Observe that the object spec specifies 5 replicas but actual cluster contains only 1 Pod replica. + * Create 4 Pods and set their OwnerReferences to the object. + +reconcile may be implemented as either a type: + + type reconcile struct {} + + func (reconcile) reconcile(controller.Request) (controller.Result, error) { + // Implement business logic of reading and writing objects here + return controller.Result{}, nil + } + +Or as a function: + + controller.Func(func(o controller.Request) (controller.Result, error) { + // Implement business logic of reading and writing objects here + return controller.Result{}, nil + }) + +Reconciliation is level-based, meaning action isn't driven off changes in individual Events, but instead is +driven by actual cluster state read from the apiserver or a local cache. +For example if responding to a Pod Delete Event, the Request won't contain that a Pod was deleted, +instead the reconcile function observes this when reading the cluster state and seeing the Pod as missing. +*/ +type Reconciler interface { + // Reconcile performs a full reconciliation for the object referred to by the Request. + // The Controller will requeue the Request to be processed again if an error is non-nil or + // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. + Reconcile(context.Context, Request) (Result, error) +} + +// Func is a function that implements the reconcile interface. +type Func func(context.Context, Request) (Result, error) + +var _ Reconciler = Func(nil) + +// Reconcile implements Reconciler. +func (r Func) Reconcile(ctx context.Context, o Request) (Result, error) { return r(ctx, o) } diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/recorder/recorder.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/recorder/recorder.go new file mode 100644 index 000000000..f093f0a72 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/recorder/recorder.go @@ -0,0 +1,31 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package recorder defines interfaces for working with Kubernetes event recorders. +// +// You can use these to emit Kubernetes events associated with a particular Kubernetes +// object. +package recorder + +import ( + "k8s.io/client-go/tools/record" +) + +// Provider knows how to generate new event recorders with given name. +type Provider interface { + // NewRecorder returns an EventRecorder with given name. + GetEventRecorderFor(name string) record.EventRecorder +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go new file mode 100644 index 000000000..17c60895f --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package inject defines interfaces and functions for propagating dependencies from a ControllerManager to +the components registered with it. Dependencies are propagated to Reconciler, Source, EventHandler and Predicate +objects which implement the Injectable interfaces. +*/ +package inject diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go new file mode 100644 index 000000000..c8c56ba81 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go @@ -0,0 +1,164 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package inject is used by a Manager to inject types into Sources, EventHandlers, Predicates, and Reconciles. +// Deprecated: Use manager.Options fields directly. This package will be removed in v0.10. +package inject + +import ( + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Cache is used by the ControllerManager to inject Cache into Sources, EventHandlers, Predicates, and +// Reconciles. +type Cache interface { + InjectCache(cache cache.Cache) error +} + +// CacheInto will set informers on i and return the result if it implements Cache. Returns +// false if i does not implement Cache. +func CacheInto(c cache.Cache, i interface{}) (bool, error) { + if s, ok := i.(Cache); ok { + return true, s.InjectCache(c) + } + return false, nil +} + +// APIReader is used by the Manager to inject the APIReader into necessary types. +type APIReader interface { + InjectAPIReader(client.Reader) error +} + +// APIReaderInto will set APIReader on i and return the result if it implements APIReaderInto. +// Returns false if i does not implement APIReader. +func APIReaderInto(reader client.Reader, i interface{}) (bool, error) { + if s, ok := i.(APIReader); ok { + return true, s.InjectAPIReader(reader) + } + return false, nil +} + +// Config is used by the ControllerManager to inject Config into Sources, EventHandlers, Predicates, and +// Reconciles. +type Config interface { + InjectConfig(*rest.Config) error +} + +// ConfigInto will set config on i and return the result if it implements Config. Returns +// false if i does not implement Config. +func ConfigInto(config *rest.Config, i interface{}) (bool, error) { + if s, ok := i.(Config); ok { + return true, s.InjectConfig(config) + } + return false, nil +} + +// Client is used by the ControllerManager to inject client into Sources, EventHandlers, Predicates, and +// Reconciles. +type Client interface { + InjectClient(client.Client) error +} + +// ClientInto will set client on i and return the result if it implements Client. Returns +// false if i does not implement Client. +func ClientInto(client client.Client, i interface{}) (bool, error) { + if s, ok := i.(Client); ok { + return true, s.InjectClient(client) + } + return false, nil +} + +// Scheme is used by the ControllerManager to inject Scheme into Sources, EventHandlers, Predicates, and +// Reconciles. +type Scheme interface { + InjectScheme(scheme *runtime.Scheme) error +} + +// SchemeInto will set scheme and return the result on i if it implements Scheme. Returns +// false if i does not implement Scheme. +func SchemeInto(scheme *runtime.Scheme, i interface{}) (bool, error) { + if is, ok := i.(Scheme); ok { + return true, is.InjectScheme(scheme) + } + return false, nil +} + +// Stoppable is used by the ControllerManager to inject stop channel into Sources, +// EventHandlers, Predicates, and Reconciles. +type Stoppable interface { + InjectStopChannel(<-chan struct{}) error +} + +// StopChannelInto will set stop channel on i and return the result if it implements Stoppable. +// Returns false if i does not implement Stoppable. +func StopChannelInto(stop <-chan struct{}, i interface{}) (bool, error) { + if s, ok := i.(Stoppable); ok { + return true, s.InjectStopChannel(stop) + } + return false, nil +} + +// Mapper is used to inject the rest mapper to components that may need it. +type Mapper interface { + InjectMapper(meta.RESTMapper) error +} + +// MapperInto will set the rest mapper on i and return the result if it implements Mapper. +// Returns false if i does not implement Mapper. +func MapperInto(mapper meta.RESTMapper, i interface{}) (bool, error) { + if m, ok := i.(Mapper); ok { + return true, m.InjectMapper(mapper) + } + return false, nil +} + +// Func injects dependencies into i. +type Func func(i interface{}) error + +// Injector is used by the ControllerManager to inject Func into Controllers. +type Injector interface { + InjectFunc(f Func) error +} + +// InjectorInto will set f and return the result on i if it implements Injector. Returns +// false if i does not implement Injector. +func InjectorInto(f Func, i interface{}) (bool, error) { + if ii, ok := i.(Injector); ok { + return true, ii.InjectFunc(f) + } + return false, nil +} + +// Logger is used to inject Loggers into components that need them +// and don't otherwise have opinions. +type Logger interface { + InjectLogger(l logr.Logger) error +} + +// LoggerInto will set the logger on the given object if it implements inject.Logger, +// returning true if a InjectLogger was called, and false otherwise. +func LoggerInto(l logr.Logger, i interface{}) (bool, error) { + if injectable, wantsLogger := i.(Logger); wantsLogger { + return true, injectable.InjectLogger(l) + } + return false, nil +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go new file mode 100644 index 000000000..9dc93a9b2 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go @@ -0,0 +1,94 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package scheme contains utilities for gradually building Schemes, +// which contain information associating Go types with Kubernetes +// groups, versions, and kinds. +// +// Each API group should define a utility function +// called AddToScheme for adding its types to a Scheme: +// +// // in package myapigroupv1... +// var ( +// SchemeGroupVersion = schema.GroupVersion{Group: "my.api.group", Version: "v1"} +// SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} +// AddToScheme = SchemeBuilder.AddToScheme +// ) +// +// func init() { +// SchemeBuilder.Register(&MyType{}, &MyTypeList) +// } +// var ( +// scheme *runtime.Scheme = runtime.NewScheme() +// ) +// +// This also true of the built-in Kubernetes types. Then, in the entrypoint for +// your manager, assemble the scheme containing exactly the types you need, +// panicing if scheme registration failed. For instance, if our controller needs +// types from the core/v1 API group (e.g. Pod), plus types from my.api.group/v1: +// +// func init() { +// utilruntime.Must(myapigroupv1.AddToScheme(scheme)) +// utilruntime.Must(kubernetesscheme.AddToScheme(scheme)) +// } +// +// func main() { +// mgr := controllers.NewManager(context.Background(), controllers.GetConfigOrDie(), manager.Options{ +// Scheme: scheme, +// }) +// // ... +// } +// +package scheme + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Builder builds a new Scheme for mapping go types to Kubernetes GroupVersionKinds. +type Builder struct { + GroupVersion schema.GroupVersion + runtime.SchemeBuilder +} + +// Register adds one or more objects to the SchemeBuilder so they can be added to a Scheme. Register mutates bld. +func (bld *Builder) Register(object ...runtime.Object) *Builder { + bld.SchemeBuilder.Register(func(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(bld.GroupVersion, object...) + metav1.AddToGroupVersion(scheme, bld.GroupVersion) + return nil + }) + return bld +} + +// RegisterAll registers all types from the Builder argument. RegisterAll mutates bld. +func (bld *Builder) RegisterAll(b *Builder) *Builder { + bld.SchemeBuilder = append(bld.SchemeBuilder, b.SchemeBuilder...) + return bld +} + +// AddToScheme adds all registered types to s. +func (bld *Builder) AddToScheme(s *runtime.Scheme) error { + return bld.SchemeBuilder.AddToScheme(s) +} + +// Build returns a new Scheme containing the registered types. +func (bld *Builder) Build() (*runtime.Scheme, error) { + s := runtime.NewScheme() + return s, bld.AddToScheme(s) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/source/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/source/doc.go new file mode 100644 index 000000000..31935c83c --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/source/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package source provides event streams to hook up to Controllers with Controller.Watch. Events are +used with handler.EventHandlers to enqueue reconcile.Requests and trigger Reconciles for Kubernetes +objects. +*/ +package source diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go new file mode 100644 index 000000000..f0cfe212e --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go @@ -0,0 +1,138 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +var log = logf.RuntimeLog.WithName("source").WithName("EventHandler") + +var _ cache.ResourceEventHandler = EventHandler{} + +// EventHandler adapts a handler.EventHandler interface to a cache.ResourceEventHandler interface. +type EventHandler struct { + EventHandler handler.EventHandler + Queue workqueue.RateLimitingInterface + Predicates []predicate.Predicate +} + +// OnAdd creates CreateEvent and calls Create on EventHandler. +func (e EventHandler) OnAdd(obj interface{}) { + c := event.CreateEvent{} + + // Pull Object out of the object + if o, ok := obj.(client.Object); ok { + c.Object = o + } else { + log.Error(nil, "OnAdd missing Object", + "object", obj, "type", fmt.Sprintf("%T", obj)) + return + } + + for _, p := range e.Predicates { + if !p.Create(c) { + return + } + } + + // Invoke create handler + e.EventHandler.Create(c, e.Queue) +} + +// OnUpdate creates UpdateEvent and calls Update on EventHandler. +func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { + u := event.UpdateEvent{} + + if o, ok := oldObj.(client.Object); ok { + u.ObjectOld = o + } else { + log.Error(nil, "OnUpdate missing ObjectOld", + "object", oldObj, "type", fmt.Sprintf("%T", oldObj)) + return + } + + // Pull Object out of the object + if o, ok := newObj.(client.Object); ok { + u.ObjectNew = o + } else { + log.Error(nil, "OnUpdate missing ObjectNew", + "object", newObj, "type", fmt.Sprintf("%T", newObj)) + return + } + + for _, p := range e.Predicates { + if !p.Update(u) { + return + } + } + + // Invoke update handler + e.EventHandler.Update(u, e.Queue) +} + +// OnDelete creates DeleteEvent and calls Delete on EventHandler. +func (e EventHandler) OnDelete(obj interface{}) { + d := event.DeleteEvent{} + + // Deal with tombstone events by pulling the object out. Tombstone events wrap the object in a + // DeleteFinalStateUnknown struct, so the object needs to be pulled out. + // Copied from sample-controller + // This should never happen if we aren't missing events, which we have concluded that we are not + // and made decisions off of this belief. Maybe this shouldn't be here? + var ok bool + if _, ok = obj.(client.Object); !ok { + // If the object doesn't have Metadata, assume it is a tombstone object of type DeletedFinalStateUnknown + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + log.Error(nil, "Error decoding objects. Expected cache.DeletedFinalStateUnknown", + "type", fmt.Sprintf("%T", obj), + "object", obj) + return + } + + // Set obj to the tombstone obj + obj = tombstone.Obj + } + + // Pull Object out of the object + if o, ok := obj.(client.Object); ok { + d.Object = o + } else { + log.Error(nil, "OnDelete missing Object", + "object", obj, "type", fmt.Sprintf("%T", obj)) + return + } + + for _, p := range e.Predicates { + if !p.Delete(d) { + return + } + } + + // Invoke delete handler + e.EventHandler.Delete(d, e.Queue) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go new file mode 100644 index 000000000..241c582ef --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go @@ -0,0 +1,375 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package source + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/source/internal" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +var log = logf.RuntimeLog.WithName("source") + +const ( + // defaultBufferSize is the default number of event notifications that can be buffered. + defaultBufferSize = 1024 +) + +// Source is a source of events (eh.g. Create, Update, Delete operations on Kubernetes Objects, Webhook callbacks, etc) +// which should be processed by event.EventHandlers to enqueue reconcile.Requests. +// +// * Use Kind for events originating in the cluster (e.g. Pod Create, Pod Update, Deployment Update). +// +// * Use Channel for events originating outside the cluster (eh.g. GitHub Webhook callback, Polling external urls). +// +// Users may build their own Source implementations. If their implementations implement any of the inject package +// interfaces, the dependencies will be injected by the Controller when Watch is called. +type Source interface { + // Start is internal and should be called only by the Controller to register an EventHandler with the Informer + // to enqueue reconcile.Requests. + Start(context.Context, handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error +} + +// SyncingSource is a source that needs syncing prior to being usable. The controller +// will call its WaitForSync prior to starting workers. +type SyncingSource interface { + Source + WaitForSync(ctx context.Context) error +} + +// NewKindWithCache creates a Source without InjectCache, so that it is assured that the given cache is used +// and not overwritten. It can be used to watch objects in a different cluster by passing the cache +// from that other cluster. +func NewKindWithCache(object client.Object, cache cache.Cache) SyncingSource { + return &kindWithCache{kind: Kind{Type: object, cache: cache}} +} + +type kindWithCache struct { + kind Kind +} + +func (ks *kindWithCache) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, + prct ...predicate.Predicate) error { + return ks.kind.Start(ctx, handler, queue, prct...) +} + +func (ks *kindWithCache) WaitForSync(ctx context.Context) error { + return ks.kind.WaitForSync(ctx) +} + +// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). +type Kind struct { + // Type is the type of object to watch. e.g. &v1.Pod{} + Type client.Object + + // cache used to watch APIs + cache cache.Cache + + // started may contain an error if one was encountered during startup. If its closed and does not + // contain an error, startup and syncing finished. + started chan error + startCancel func() +} + +var _ SyncingSource = &Kind{} + +// Start is internal and should be called only by the Controller to register an EventHandler with the Informer +// to enqueue reconcile.Requests. +func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, + prct ...predicate.Predicate) error { + // Type should have been specified by the user. + if ks.Type == nil { + return fmt.Errorf("must specify Kind.Type") + } + + // cache should have been injected before Start was called + if ks.cache == nil { + return fmt.Errorf("must call CacheInto on Kind before calling Start") + } + + // cache.GetInformer will block until its context is cancelled if the cache was already started and it can not + // sync that informer (most commonly due to RBAC issues). + ctx, ks.startCancel = context.WithCancel(ctx) + ks.started = make(chan error) + go func() { + var ( + i cache.Informer + lastErr error + ) + + // Tries to get an informer until it returns true, + // an error or the specified context is cancelled or expired. + if err := wait.PollImmediateUntilWithContext(ctx, 10*time.Second, func(ctx context.Context) (bool, error) { + // Lookup the Informer from the Cache and add an EventHandler which populates the Queue + i, lastErr = ks.cache.GetInformer(ctx, ks.Type) + if lastErr != nil { + kindMatchErr := &meta.NoKindMatchError{} + switch { + case errors.As(lastErr, &kindMatchErr): + log.Error(lastErr, "if kind is a CRD, it should be installed before calling Start", + "kind", kindMatchErr.GroupKind) + case runtime.IsNotRegisteredError(lastErr): + log.Error(lastErr, "kind must be registered to the Scheme") + default: + log.Error(lastErr, "failed to get informer from cache") + } + return false, nil // Retry. + } + return true, nil + }); err != nil { + if lastErr != nil { + ks.started <- fmt.Errorf("failed to get informer from cache: %w", lastErr) + return + } + ks.started <- err + return + } + + i.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct}) + if !ks.cache.WaitForCacheSync(ctx) { + // Would be great to return something more informative here + ks.started <- errors.New("cache did not sync") + } + close(ks.started) + }() + + return nil +} + +func (ks *Kind) String() string { + if ks.Type != nil { + return fmt.Sprintf("kind source: %T", ks.Type) + } + return "kind source: unknown type" +} + +// WaitForSync implements SyncingSource to allow controllers to wait with starting +// workers until the cache is synced. +func (ks *Kind) WaitForSync(ctx context.Context) error { + select { + case err := <-ks.started: + return err + case <-ctx.Done(): + ks.startCancel() + if errors.Is(ctx.Err(), context.Canceled) { + return nil + } + return errors.New("timed out waiting for cache to be synced") + } +} + +var _ inject.Cache = &Kind{} + +// InjectCache is internal should be called only by the Controller. InjectCache is used to inject +// the Cache dependency initialized by the ControllerManager. +func (ks *Kind) InjectCache(c cache.Cache) error { + if ks.cache == nil { + ks.cache = c + } + return nil +} + +var _ Source = &Channel{} + +// Channel is used to provide a source of events originating outside the cluster +// (e.g. GitHub Webhook callback). Channel requires the user to wire the external +// source (eh.g. http handler) to write GenericEvents to the underlying channel. +type Channel struct { + // once ensures the event distribution goroutine will be performed only once + once sync.Once + + // Source is the source channel to fetch GenericEvents + Source <-chan event.GenericEvent + + // stop is to end ongoing goroutine, and close the channels + stop <-chan struct{} + + // dest is the destination channels of the added event handlers + dest []chan event.GenericEvent + + // DestBufferSize is the specified buffer size of dest channels. + // Default to 1024 if not specified. + DestBufferSize int + + // destLock is to ensure the destination channels are safely added/removed + destLock sync.Mutex +} + +func (cs *Channel) String() string { + return fmt.Sprintf("channel source: %p", cs) +} + +var _ inject.Stoppable = &Channel{} + +// InjectStopChannel is internal should be called only by the Controller. +// It is used to inject the stop channel initialized by the ControllerManager. +func (cs *Channel) InjectStopChannel(stop <-chan struct{}) error { + if cs.stop == nil { + cs.stop = stop + } + + return nil +} + +// Start implements Source and should only be called by the Controller. +func (cs *Channel) Start( + ctx context.Context, + handler handler.EventHandler, + queue workqueue.RateLimitingInterface, + prct ...predicate.Predicate) error { + // Source should have been specified by the user. + if cs.Source == nil { + return fmt.Errorf("must specify Channel.Source") + } + + // stop should have been injected before Start was called + if cs.stop == nil { + return fmt.Errorf("must call InjectStop on Channel before calling Start") + } + + // use default value if DestBufferSize not specified + if cs.DestBufferSize == 0 { + cs.DestBufferSize = defaultBufferSize + } + + dst := make(chan event.GenericEvent, cs.DestBufferSize) + + cs.destLock.Lock() + cs.dest = append(cs.dest, dst) + cs.destLock.Unlock() + + cs.once.Do(func() { + // Distribute GenericEvents to all EventHandler / Queue pairs Watching this source + go cs.syncLoop(ctx) + }) + + go func() { + for evt := range dst { + shouldHandle := true + for _, p := range prct { + if !p.Generic(evt) { + shouldHandle = false + break + } + } + + if shouldHandle { + handler.Generic(evt, queue) + } + } + }() + + return nil +} + +func (cs *Channel) doStop() { + cs.destLock.Lock() + defer cs.destLock.Unlock() + + for _, dst := range cs.dest { + close(dst) + } +} + +func (cs *Channel) distribute(evt event.GenericEvent) { + cs.destLock.Lock() + defer cs.destLock.Unlock() + + for _, dst := range cs.dest { + // We cannot make it under goroutine here, or we'll meet the + // race condition of writing message to closed channels. + // To avoid blocking, the dest channels are expected to be of + // proper buffer size. If we still see it blocked, then + // the controller is thought to be in an abnormal state. + dst <- evt + } +} + +func (cs *Channel) syncLoop(ctx context.Context) { + for { + select { + case <-ctx.Done(): + // Close destination channels + cs.doStop() + return + case evt, stillOpen := <-cs.Source: + if !stillOpen { + // if the source channel is closed, we're never gonna get + // anything more on it, so stop & bail + cs.doStop() + return + } + cs.distribute(evt) + } + } +} + +// Informer is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). +type Informer struct { + // Informer is the controller-runtime Informer + Informer cache.Informer +} + +var _ Source = &Informer{} + +// Start is internal and should be called only by the Controller to register an EventHandler with the Informer +// to enqueue reconcile.Requests. +func (is *Informer) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, + prct ...predicate.Predicate) error { + // Informer should have been specified by the user. + if is.Informer == nil { + return fmt.Errorf("must specify Informer.Informer") + } + + is.Informer.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct}) + return nil +} + +func (is *Informer) String() string { + return fmt.Sprintf("informer source: %p", is.Informer) +} + +var _ Source = Func(nil) + +// Func is a function that implements Source. +type Func func(context.Context, handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error + +// Start implements Source. +func (f Func) Start(ctx context.Context, evt handler.EventHandler, queue workqueue.RateLimitingInterface, + pr ...predicate.Predicate) error { + return f(ctx, evt, queue, pr...) +} + +func (f Func) String() string { + return fmt.Sprintf("func source: %p", f) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go new file mode 100644 index 000000000..c7cb71b75 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go @@ -0,0 +1,72 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/json" +) + +// Decoder knows how to decode the contents of an admission +// request into a concrete object. +type Decoder struct { + codecs serializer.CodecFactory +} + +// NewDecoder creates a Decoder given the runtime.Scheme. +func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) { + return &Decoder{codecs: serializer.NewCodecFactory(scheme)}, nil +} + +// Decode decodes the inlined object in the AdmissionRequest into the passed-in runtime.Object. +// If you want decode the OldObject in the AdmissionRequest, use DecodeRaw. +// It errors out if req.Object.Raw is empty i.e. containing 0 raw bytes. +func (d *Decoder) Decode(req Request, into runtime.Object) error { + // we error out if rawObj is an empty object. + if len(req.Object.Raw) == 0 { + return fmt.Errorf("there is no content to decode") + } + return d.DecodeRaw(req.Object, into) +} + +// DecodeRaw decodes a RawExtension object into the passed-in runtime.Object. +// It errors out if rawObj is empty i.e. containing 0 raw bytes. +func (d *Decoder) DecodeRaw(rawObj runtime.RawExtension, into runtime.Object) error { + // NB(directxman12): there's a bug/weird interaction between decoders and + // the API server where the API server doesn't send a GVK on the embedded + // objects, which means the unstructured decoder refuses to decode. It + // also means we can't pass the unstructured directly in, since it'll try + // and call unstructured's special Unmarshal implementation, which calls + // back into that same decoder :-/ + // See kubernetes/kubernetes#74373. + + // we error out if rawObj is an empty object. + if len(rawObj.Raw) == 0 { + return fmt.Errorf("there is no content to decode") + } + if unstructuredInto, isUnstructured := into.(*unstructured.Unstructured); isUnstructured { + // unmarshal into unstructured's underlying object to avoid calling the decoder + return json.Unmarshal(rawObj.Raw, &unstructuredInto.Object) + } + + deserializer := d.codecs.UniversalDeserializer() + return runtime.DecodeInto(deserializer, rawObj.Raw, into) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go new file mode 100644 index 000000000..0d9aa7a83 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go @@ -0,0 +1,74 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "encoding/json" + "net/http" + + "k8s.io/apimachinery/pkg/runtime" +) + +// Defaulter defines functions for setting defaults on resources. +type Defaulter interface { + runtime.Object + Default() +} + +// DefaultingWebhookFor creates a new Webhook for Defaulting the provided type. +func DefaultingWebhookFor(defaulter Defaulter) *Webhook { + return &Webhook{ + Handler: &mutatingHandler{defaulter: defaulter}, + } +} + +type mutatingHandler struct { + defaulter Defaulter + decoder *Decoder +} + +var _ DecoderInjector = &mutatingHandler{} + +// InjectDecoder injects the decoder into a mutatingHandler. +func (h *mutatingHandler) InjectDecoder(d *Decoder) error { + h.decoder = d + return nil +} + +// Handle handles admission requests. +func (h *mutatingHandler) Handle(ctx context.Context, req Request) Response { + if h.defaulter == nil { + panic("defaulter should never be nil") + } + + // Get the object in the request + obj := h.defaulter.DeepCopyObject().(Defaulter) + if err := h.decoder.Decode(req, obj); err != nil { + return Errored(http.StatusBadRequest, err) + } + + // Default the object + obj.Default() + marshalled, err := json.Marshal(obj) + if err != nil { + return Errored(http.StatusInternalServerError, err) + } + + // Create the patch + return PatchResponseFromRaw(req.Object.Raw, marshalled) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go new file mode 100644 index 000000000..d65727e62 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go @@ -0,0 +1,86 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "encoding/json" + "errors" + "net/http" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" +) + +// CustomDefaulter defines functions for setting defaults on resources. +type CustomDefaulter interface { + Default(ctx context.Context, obj runtime.Object) error +} + +// WithCustomDefaulter creates a new Webhook for a CustomDefaulter interface. +func WithCustomDefaulter(obj runtime.Object, defaulter CustomDefaulter) *Webhook { + return &Webhook{ + Handler: &defaulterForType{object: obj, defaulter: defaulter}, + } +} + +type defaulterForType struct { + defaulter CustomDefaulter + object runtime.Object + decoder *Decoder +} + +var _ DecoderInjector = &defaulterForType{} + +func (h *defaulterForType) InjectDecoder(d *Decoder) error { + h.decoder = d + return nil +} + +// Handle handles admission requests. +func (h *defaulterForType) Handle(ctx context.Context, req Request) Response { + if h.defaulter == nil { + panic("defaulter should never be nil") + } + if h.object == nil { + panic("object should never be nil") + } + + ctx = NewContextWithRequest(ctx, req) + + // Get the object in the request + obj := h.object.DeepCopyObject() + if err := h.decoder.Decode(req, obj); err != nil { + return Errored(http.StatusBadRequest, err) + } + + // Default the object + if err := h.defaulter.Default(ctx, obj); err != nil { + var apiStatus apierrors.APIStatus + if errors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()) + } + return Denied(err.Error()) + } + + // Create the patch + marshalled, err := json.Marshal(obj) + if err != nil { + return Errored(http.StatusInternalServerError, err) + } + return PatchResponseFromRaw(req.Object.Raw, marshalled) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go new file mode 100644 index 000000000..0b274dd02 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package admission provides implementation for admission webhook and methods to implement admission webhook handlers. + +See examples/mutatingwebhook.go and examples/validatingwebhook.go for examples of admission webhooks. +*/ +package admission + +import ( + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("admission") diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go new file mode 100644 index 000000000..066cc4225 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go @@ -0,0 +1,153 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + v1 "k8s.io/api/admission/v1" + "k8s.io/api/admission/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var admissionScheme = runtime.NewScheme() +var admissionCodecs = serializer.NewCodecFactory(admissionScheme) + +func init() { + utilruntime.Must(v1.AddToScheme(admissionScheme)) + utilruntime.Must(v1beta1.AddToScheme(admissionScheme)) +} + +var _ http.Handler = &Webhook{} + +func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var body []byte + var err error + ctx := r.Context() + if wh.WithContextFunc != nil { + ctx = wh.WithContextFunc(ctx, r) + } + + var reviewResponse Response + if r.Body == nil { + err = errors.New("request body is empty") + wh.log.Error(err, "bad request") + reviewResponse = Errored(http.StatusBadRequest, err) + wh.writeResponse(w, reviewResponse) + return + } + + defer r.Body.Close() + if body, err = io.ReadAll(r.Body); err != nil { + wh.log.Error(err, "unable to read the body from the incoming request") + reviewResponse = Errored(http.StatusBadRequest, err) + wh.writeResponse(w, reviewResponse) + return + } + + // verify the content type is accurate + if contentType := r.Header.Get("Content-Type"); contentType != "application/json" { + err = fmt.Errorf("contentType=%s, expected application/json", contentType) + wh.log.Error(err, "unable to process a request with an unknown content type", "content type", contentType) + reviewResponse = Errored(http.StatusBadRequest, err) + wh.writeResponse(w, reviewResponse) + return + } + + // Both v1 and v1beta1 AdmissionReview types are exactly the same, so the v1beta1 type can + // be decoded into the v1 type. However the runtime codec's decoder guesses which type to + // decode into by type name if an Object's TypeMeta isn't set. By setting TypeMeta of an + // unregistered type to the v1 GVK, the decoder will coerce a v1beta1 AdmissionReview to v1. + // The actual AdmissionReview GVK will be used to write a typed response in case the + // webhook config permits multiple versions, otherwise this response will fail. + req := Request{} + ar := unversionedAdmissionReview{} + // avoid an extra copy + ar.Request = &req.AdmissionRequest + ar.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("AdmissionReview")) + _, actualAdmRevGVK, err := admissionCodecs.UniversalDeserializer().Decode(body, nil, &ar) + if err != nil { + wh.log.Error(err, "unable to decode the request") + reviewResponse = Errored(http.StatusBadRequest, err) + wh.writeResponse(w, reviewResponse) + return + } + wh.log.V(1).Info("received request", "UID", req.UID, "kind", req.Kind, "resource", req.Resource) + + reviewResponse = wh.Handle(ctx, req) + wh.writeResponseTyped(w, reviewResponse, actualAdmRevGVK) +} + +// writeResponse writes response to w generically, i.e. without encoding GVK information. +func (wh *Webhook) writeResponse(w io.Writer, response Response) { + wh.writeAdmissionResponse(w, v1.AdmissionReview{Response: &response.AdmissionResponse}) +} + +// writeResponseTyped writes response to w with GVK set to admRevGVK, which is necessary +// if multiple AdmissionReview versions are permitted by the webhook. +func (wh *Webhook) writeResponseTyped(w io.Writer, response Response, admRevGVK *schema.GroupVersionKind) { + ar := v1.AdmissionReview{ + Response: &response.AdmissionResponse, + } + // Default to a v1 AdmissionReview, otherwise the API server may not recognize the request + // if multiple AdmissionReview versions are permitted by the webhook config. + // TODO(estroz): this should be configurable since older API servers won't know about v1. + if admRevGVK == nil || *admRevGVK == (schema.GroupVersionKind{}) { + ar.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("AdmissionReview")) + } else { + ar.SetGroupVersionKind(*admRevGVK) + } + wh.writeAdmissionResponse(w, ar) +} + +// writeAdmissionResponse writes ar to w. +func (wh *Webhook) writeAdmissionResponse(w io.Writer, ar v1.AdmissionReview) { + if err := json.NewEncoder(w).Encode(ar); err != nil { + wh.log.Error(err, "unable to encode and write the response") + // Since the `ar v1.AdmissionReview` is a clear and legal object, + // it should not have problem to be marshalled into bytes. + // The error here is probably caused by the abnormal HTTP connection, + // e.g., broken pipe, so we can only write the error response once, + // to avoid endless circular calling. + serverError := Errored(http.StatusInternalServerError, err) + if err = json.NewEncoder(w).Encode(v1.AdmissionReview{Response: &serverError.AdmissionResponse}); err != nil { + wh.log.Error(err, "still unable to encode and write the InternalServerError response") + } + } else { + res := ar.Response + if log := wh.log; log.V(1).Enabled() { + if res.Result != nil { + log = log.WithValues("code", res.Result.Code, "reason", res.Result.Reason) + } + log.V(1).Info("wrote response", "UID", res.UID, "allowed", res.Allowed) + } + } +} + +// unversionedAdmissionReview is used to decode both v1 and v1beta1 AdmissionReview types. +type unversionedAdmissionReview struct { + v1.AdmissionReview +} + +var _ runtime.Object = &unversionedAdmissionReview{} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go new file mode 100644 index 000000000..d5af0d598 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go @@ -0,0 +1,31 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +// DecoderInjector is used by the ControllerManager to inject decoder into webhook handlers. +type DecoderInjector interface { + InjectDecoder(*Decoder) error +} + +// InjectDecoderInto will set decoder on i and return the result if it implements Decoder. Returns +// false if i does not implement Decoder. +func InjectDecoderInto(decoder *Decoder, i interface{}) (bool, error) { + if s, ok := i.(DecoderInjector); ok { + return true, s.InjectDecoder(decoder) + } + return false, nil +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go new file mode 100644 index 000000000..26900cf2e --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go @@ -0,0 +1,147 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + jsonpatch "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +type multiMutating []Handler + +func (hs multiMutating) Handle(ctx context.Context, req Request) Response { + patches := []jsonpatch.JsonPatchOperation{} + for _, handler := range hs { + resp := handler.Handle(ctx, req) + if !resp.Allowed { + return resp + } + if resp.PatchType != nil && *resp.PatchType != admissionv1.PatchTypeJSONPatch { + return Errored(http.StatusInternalServerError, + fmt.Errorf("unexpected patch type returned by the handler: %v, only allow: %v", + resp.PatchType, admissionv1.PatchTypeJSONPatch)) + } + patches = append(patches, resp.Patches...) + } + var err error + marshaledPatch, err := json.Marshal(patches) + if err != nil { + return Errored(http.StatusBadRequest, fmt.Errorf("error when marshaling the patch: %w", err)) + } + return Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: http.StatusOK, + }, + Patch: marshaledPatch, + PatchType: func() *admissionv1.PatchType { pt := admissionv1.PatchTypeJSONPatch; return &pt }(), + }, + } +} + +// InjectFunc injects the field setter into the handlers. +func (hs multiMutating) InjectFunc(f inject.Func) error { + // inject directly into the handlers. It would be more correct + // to do this in a sync.Once in Handle (since we don't have some + // other start/finalize-type method), but it's more efficient to + // do it here, presumably. + for _, handler := range hs { + if err := f(handler); err != nil { + return err + } + } + + return nil +} + +// InjectDecoder injects the decoder into the handlers. +func (hs multiMutating) InjectDecoder(d *Decoder) error { + for _, handler := range hs { + if _, err := InjectDecoderInto(d, handler); err != nil { + return err + } + } + return nil +} + +// MultiMutatingHandler combines multiple mutating webhook handlers into a single +// mutating webhook handler. Handlers are called in sequential order, and the first +// `allowed: false` response may short-circuit the rest. Users must take care to +// ensure patches are disjoint. +func MultiMutatingHandler(handlers ...Handler) Handler { + return multiMutating(handlers) +} + +type multiValidating []Handler + +func (hs multiValidating) Handle(ctx context.Context, req Request) Response { + for _, handler := range hs { + resp := handler.Handle(ctx, req) + if !resp.Allowed { + return resp + } + } + return Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: http.StatusOK, + }, + }, + } +} + +// MultiValidatingHandler combines multiple validating webhook handlers into a single +// validating webhook handler. Handlers are called in sequential order, and the first +// `allowed: false` response may short-circuit the rest. +func MultiValidatingHandler(handlers ...Handler) Handler { + return multiValidating(handlers) +} + +// InjectFunc injects the field setter into the handlers. +func (hs multiValidating) InjectFunc(f inject.Func) error { + // inject directly into the handlers. It would be more correct + // to do this in a sync.Once in Handle (since we don't have some + // other start/finalize-type method), but it's more efficient to + // do it here, presumably. + for _, handler := range hs { + if err := f(handler); err != nil { + return err + } + } + + return nil +} + +// InjectDecoder injects the decoder into the handlers. +func (hs multiValidating) InjectDecoder(d *Decoder) error { + for _, handler := range hs { + if _, err := InjectDecoderInto(d, handler); err != nil { + return err + } + } + return nil +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go new file mode 100644 index 000000000..24ff1dee3 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go @@ -0,0 +1,121 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "net/http" + + jsonpatch "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Allowed constructs a response indicating that the given operation +// is allowed (without any patches). +func Allowed(reason string) Response { + return ValidationResponse(true, reason) +} + +// Denied constructs a response indicating that the given operation +// is not allowed. +func Denied(reason string) Response { + return ValidationResponse(false, reason) +} + +// Patched constructs a response indicating that the given operation is +// allowed, and that the target object should be modified by the given +// JSONPatch operations. +func Patched(reason string, patches ...jsonpatch.JsonPatchOperation) Response { + resp := Allowed(reason) + resp.Patches = patches + + return resp +} + +// Errored creates a new Response for error-handling a request. +func Errored(code int32, err error) Response { + return Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Code: code, + Message: err.Error(), + }, + }, + } +} + +// ValidationResponse returns a response for admitting a request. +func ValidationResponse(allowed bool, reason string) Response { + code := http.StatusForbidden + if allowed { + code = http.StatusOK + } + resp := Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: allowed, + Result: &metav1.Status{ + Code: int32(code), + }, + }, + } + if len(reason) > 0 { + resp.Result.Reason = metav1.StatusReason(reason) + } + return resp +} + +// PatchResponseFromRaw takes 2 byte arrays and returns a new response with json patch. +// The original object should be passed in as raw bytes to avoid the roundtripping problem +// described in https://github.com/kubernetes-sigs/kubebuilder/issues/510. +func PatchResponseFromRaw(original, current []byte) Response { + patches, err := jsonpatch.CreatePatch(original, current) + if err != nil { + return Errored(http.StatusInternalServerError, err) + } + return Response{ + Patches: patches, + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + PatchType: func() *admissionv1.PatchType { + if len(patches) == 0 { + return nil + } + pt := admissionv1.PatchTypeJSONPatch + return &pt + }(), + }, + } +} + +// validationResponseFromStatus returns a response for admitting a request with provided Status object. +func validationResponseFromStatus(allowed bool, status metav1.Status) Response { + resp := Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: allowed, + Result: &status, + }, + } + return resp +} + +// WithWarnings adds the given warnings to the Response. +// If any warnings were already given, they will not be overwritten. +func (r Response) WithWarnings(warnings ...string) Response { + r.AdmissionResponse.Warnings = append(r.AdmissionResponse.Warnings, warnings...) + return r +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go new file mode 100644 index 000000000..4b27e75ed --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go @@ -0,0 +1,122 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + goerrors "errors" + "net/http" + + v1 "k8s.io/api/admission/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" +) + +// Validator defines functions for validating an operation. +type Validator interface { + runtime.Object + ValidateCreate() error + ValidateUpdate(old runtime.Object) error + ValidateDelete() error +} + +// ValidatingWebhookFor creates a new Webhook for validating the provided type. +func ValidatingWebhookFor(validator Validator) *Webhook { + return &Webhook{ + Handler: &validatingHandler{validator: validator}, + } +} + +type validatingHandler struct { + validator Validator + decoder *Decoder +} + +var _ DecoderInjector = &validatingHandler{} + +// InjectDecoder injects the decoder into a validatingHandler. +func (h *validatingHandler) InjectDecoder(d *Decoder) error { + h.decoder = d + return nil +} + +// Handle handles admission requests. +func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { + if h.validator == nil { + panic("validator should never be nil") + } + + // Get the object in the request + obj := h.validator.DeepCopyObject().(Validator) + if req.Operation == v1.Create { + err := h.decoder.Decode(req, obj) + if err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = obj.ValidateCreate() + if err != nil { + var apiStatus apierrors.APIStatus + if goerrors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()) + } + return Denied(err.Error()) + } + } + + if req.Operation == v1.Update { + oldObj := obj.DeepCopyObject() + + err := h.decoder.DecodeRaw(req.Object, obj) + if err != nil { + return Errored(http.StatusBadRequest, err) + } + err = h.decoder.DecodeRaw(req.OldObject, oldObj) + if err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = obj.ValidateUpdate(oldObj) + if err != nil { + var apiStatus apierrors.APIStatus + if goerrors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()) + } + return Denied(err.Error()) + } + } + + if req.Operation == v1.Delete { + // In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346 + // OldObject contains the object being deleted + err := h.decoder.DecodeRaw(req.OldObject, obj) + if err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = obj.ValidateDelete() + if err != nil { + var apiStatus apierrors.APIStatus + if goerrors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()) + } + return Denied(err.Error()) + } + } + + return Allowed("") +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go new file mode 100644 index 000000000..33252f113 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go @@ -0,0 +1,113 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "errors" + "fmt" + "net/http" + + v1 "k8s.io/api/admission/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" +) + +// CustomValidator defines functions for validating an operation. +type CustomValidator interface { + ValidateCreate(ctx context.Context, obj runtime.Object) error + ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error + ValidateDelete(ctx context.Context, obj runtime.Object) error +} + +// WithCustomValidator creates a new Webhook for validating the provided type. +func WithCustomValidator(obj runtime.Object, validator CustomValidator) *Webhook { + return &Webhook{ + Handler: &validatorForType{object: obj, validator: validator}, + } +} + +type validatorForType struct { + validator CustomValidator + object runtime.Object + decoder *Decoder +} + +var _ DecoderInjector = &validatorForType{} + +// InjectDecoder injects the decoder into a validatingHandler. +func (h *validatorForType) InjectDecoder(d *Decoder) error { + h.decoder = d + return nil +} + +// Handle handles admission requests. +func (h *validatorForType) Handle(ctx context.Context, req Request) Response { + if h.validator == nil { + panic("validator should never be nil") + } + if h.object == nil { + panic("object should never be nil") + } + + ctx = NewContextWithRequest(ctx, req) + + // Get the object in the request + obj := h.object.DeepCopyObject() + + var err error + switch req.Operation { + case v1.Create: + if err := h.decoder.Decode(req, obj); err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = h.validator.ValidateCreate(ctx, obj) + case v1.Update: + oldObj := obj.DeepCopyObject() + if err := h.decoder.DecodeRaw(req.Object, obj); err != nil { + return Errored(http.StatusBadRequest, err) + } + if err := h.decoder.DecodeRaw(req.OldObject, oldObj); err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = h.validator.ValidateUpdate(ctx, oldObj, obj) + case v1.Delete: + // In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346 + // OldObject contains the object being deleted + if err := h.decoder.DecodeRaw(req.OldObject, obj); err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = h.validator.ValidateDelete(ctx, obj) + default: + return Errored(http.StatusBadRequest, fmt.Errorf("unknown operation request %q", req.Operation)) + } + + // Check the error message first. + if err != nil { + var apiStatus apierrors.APIStatus + if errors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()) + } + return Denied(err.Error()) + } + + // Return allowed if everything succeeded. + return Allowed("") +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go new file mode 100644 index 000000000..cfc46637c --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go @@ -0,0 +1,273 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "errors" + "net/http" + + "github.com/go-logr/logr" + jsonpatch "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/client-go/kubernetes/scheme" + + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" +) + +var ( + errUnableToEncodeResponse = errors.New("unable to encode response") +) + +// Request defines the input for an admission handler. +// It contains information to identify the object in +// question (group, version, kind, resource, subresource, +// name, namespace), as well as the operation in question +// (e.g. Get, Create, etc), and the object itself. +type Request struct { + admissionv1.AdmissionRequest +} + +// Response is the output of an admission handler. +// It contains a response indicating if a given +// operation is allowed, as well as a set of patches +// to mutate the object in the case of a mutating admission handler. +type Response struct { + // Patches are the JSON patches for mutating webhooks. + // Using this instead of setting Response.Patch to minimize + // overhead of serialization and deserialization. + // Patches set here will override any patches in the response, + // so leave this empty if you want to set the patch response directly. + Patches []jsonpatch.JsonPatchOperation + // AdmissionResponse is the raw admission response. + // The Patch field in it will be overwritten by the listed patches. + admissionv1.AdmissionResponse +} + +// Complete populates any fields that are yet to be set in +// the underlying AdmissionResponse, It mutates the response. +func (r *Response) Complete(req Request) error { + r.UID = req.UID + + // ensure that we have a valid status code + if r.Result == nil { + r.Result = &metav1.Status{} + } + if r.Result.Code == 0 { + r.Result.Code = http.StatusOK + } + // TODO(directxman12): do we need to populate this further, and/or + // is code actually necessary (the same webhook doesn't use it) + + if len(r.Patches) == 0 { + return nil + } + + var err error + r.Patch, err = json.Marshal(r.Patches) + if err != nil { + return err + } + patchType := admissionv1.PatchTypeJSONPatch + r.PatchType = &patchType + + return nil +} + +// Handler can handle an AdmissionRequest. +type Handler interface { + // Handle yields a response to an AdmissionRequest. + // + // The supplied context is extracted from the received http.Request, allowing wrapping + // http.Handlers to inject values into and control cancelation of downstream request processing. + Handle(context.Context, Request) Response +} + +// HandlerFunc implements Handler interface using a single function. +type HandlerFunc func(context.Context, Request) Response + +var _ Handler = HandlerFunc(nil) + +// Handle process the AdmissionRequest by invoking the underlying function. +func (f HandlerFunc) Handle(ctx context.Context, req Request) Response { + return f(ctx, req) +} + +// Webhook represents each individual webhook. +// +// It must be registered with a webhook.Server or +// populated by StandaloneWebhook to be ran on an arbitrary HTTP server. +type Webhook struct { + // Handler actually processes an admission request returning whether it was allowed or denied, + // and potentially patches to apply to the handler. + Handler Handler + + // WithContextFunc will allow you to take the http.Request.Context() and + // add any additional information such as passing the request path or + // headers thus allowing you to read them from within the handler + WithContextFunc func(context.Context, *http.Request) context.Context + + // decoder is constructed on receiving a scheme and passed down to then handler + decoder *Decoder + + log logr.Logger +} + +// InjectLogger gets a handle to a logging instance, hopefully with more info about this particular webhook. +func (wh *Webhook) InjectLogger(l logr.Logger) error { + wh.log = l + return nil +} + +// Handle processes AdmissionRequest. +// If the webhook is mutating type, it delegates the AdmissionRequest to each handler and merge the patches. +// If the webhook is validating type, it delegates the AdmissionRequest to each handler and +// deny the request if anyone denies. +func (wh *Webhook) Handle(ctx context.Context, req Request) Response { + resp := wh.Handler.Handle(ctx, req) + if err := resp.Complete(req); err != nil { + wh.log.Error(err, "unable to encode response") + return Errored(http.StatusInternalServerError, errUnableToEncodeResponse) + } + + return resp +} + +// InjectScheme injects a scheme into the webhook, in order to construct a Decoder. +func (wh *Webhook) InjectScheme(s *runtime.Scheme) error { + // TODO(directxman12): we should have a better way to pass this down + + var err error + wh.decoder, err = NewDecoder(s) + if err != nil { + return err + } + + // inject the decoder here too, just in case the order of calling this is not + // scheme first, then inject func + if wh.Handler != nil { + if _, err := InjectDecoderInto(wh.GetDecoder(), wh.Handler); err != nil { + return err + } + } + + return nil +} + +// GetDecoder returns a decoder to decode the objects embedded in admission requests. +// It may be nil if we haven't received a scheme to use to determine object types yet. +func (wh *Webhook) GetDecoder() *Decoder { + return wh.decoder +} + +// InjectFunc injects the field setter into the webhook. +func (wh *Webhook) InjectFunc(f inject.Func) error { + // inject directly into the handlers. It would be more correct + // to do this in a sync.Once in Handle (since we don't have some + // other start/finalize-type method), but it's more efficient to + // do it here, presumably. + + // also inject a decoder, and wrap this so that we get a setFields + // that injects a decoder (hopefully things don't ignore the duplicate + // InjectorInto call). + + var setFields inject.Func + setFields = func(target interface{}) error { + if err := f(target); err != nil { + return err + } + + if _, err := inject.InjectorInto(setFields, target); err != nil { + return err + } + + if _, err := InjectDecoderInto(wh.GetDecoder(), target); err != nil { + return err + } + + return nil + } + + return setFields(wh.Handler) +} + +// StandaloneOptions let you configure a StandaloneWebhook. +type StandaloneOptions struct { + // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources + // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better + // idea to pass your own scheme in. See the documentation in pkg/scheme for more information. + Scheme *runtime.Scheme + // Logger to be used by the webhook. + // If none is set, it defaults to log.Log global logger. + Logger logr.Logger + // MetricsPath is used for labelling prometheus metrics + // by the path is served on. + // If none is set, prometheus metrics will not be generated. + MetricsPath string +} + +// StandaloneWebhook prepares a webhook for use without a webhook.Server, +// passing in the information normally populated by webhook.Server +// and instrumenting the webhook with metrics. +// +// Use this to attach your webhook to an arbitrary HTTP server or mux. +// +// Note that you are responsible for terminating TLS if you use StandaloneWebhook +// in your own server/mux. In order to be accessed by a kubernetes cluster, +// all webhook servers require TLS. +func StandaloneWebhook(hook *Webhook, opts StandaloneOptions) (http.Handler, error) { + if opts.Scheme == nil { + opts.Scheme = scheme.Scheme + } + + if err := hook.InjectScheme(opts.Scheme); err != nil { + return nil, err + } + + if opts.Logger.GetSink() == nil { + opts.Logger = logf.RuntimeLog.WithName("webhook") + } + hook.log = opts.Logger + + if opts.MetricsPath == "" { + return hook, nil + } + return metrics.InstrumentedHook(opts.MetricsPath, hook), nil +} + +// requestContextKey is how we find the admission.Request in a context.Context. +type requestContextKey struct{} + +// RequestFromContext returns an admission.Request from ctx. +func RequestFromContext(ctx context.Context) (Request, error) { + if v, ok := ctx.Value(requestContextKey{}).(Request); ok { + return v, nil + } + + return Request{}, errors.New("admission.Request not found in context") +} + +// NewContextWithRequest returns a new Context, derived from ctx, which carries the +// provided admission.Request. +func NewContextWithRequest(ctx context.Context, req Request) context.Context { + return context.WithValue(ctx, requestContextKey{}, req) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go new file mode 100644 index 000000000..293137db4 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go @@ -0,0 +1,79 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "gomodules.xyz/jsonpatch/v2" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// define some aliases for common bits of the webhook functionality + +// Defaulter defines functions for setting defaults on resources. +type Defaulter = admission.Defaulter + +// Validator defines functions for validating an operation. +type Validator = admission.Validator + +// CustomDefaulter defines functions for setting defaults on resources. +type CustomDefaulter = admission.CustomDefaulter + +// CustomValidator defines functions for validating an operation. +type CustomValidator = admission.CustomValidator + +// AdmissionRequest defines the input for an admission handler. +// It contains information to identify the object in +// question (group, version, kind, resource, subresource, +// name, namespace), as well as the operation in question +// (e.g. Get, Create, etc), and the object itself. +type AdmissionRequest = admission.Request + +// AdmissionResponse is the output of an admission handler. +// It contains a response indicating if a given +// operation is allowed, as well as a set of patches +// to mutate the object in the case of a mutating admission handler. +type AdmissionResponse = admission.Response + +// Admission is webhook suitable for registration with the server +// an admission webhook that validates API operations and potentially +// mutates their contents. +type Admission = admission.Webhook + +// AdmissionHandler knows how to process admission requests, validating them, +// and potentially mutating the objects they contain. +type AdmissionHandler = admission.Handler + +// AdmissionDecoder knows how to decode objects from admission requests. +type AdmissionDecoder = admission.Decoder + +// JSONPatchOp represents a single JSONPatch patch operation. +type JSONPatchOp = jsonpatch.Operation + +var ( + // Allowed indicates that the admission request should be allowed for the given reason. + Allowed = admission.Allowed + + // Denied indicates that the admission request should be denied for the given reason. + Denied = admission.Denied + + // Patched indicates that the admission request should be allowed for the given reason, + // and that the contained object should be mutated using the given patches. + Patched = admission.Patched + + // Errored indicates that an error occurred in the admission request. + Errored = admission.Errored +) diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go new file mode 100644 index 000000000..a5b7a282c --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go @@ -0,0 +1,345 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package conversion provides implementation for CRD conversion webhook that implements handler for version conversion requests for types that are convertible. + +See pkg/conversion for interface definitions required to ensure an API Type is convertible. +*/ +package conversion + +import ( + "encoding/json" + "fmt" + "net/http" + + apix "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/conversion" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var ( + log = logf.Log.WithName("conversion-webhook") +) + +// Webhook implements a CRD conversion webhook HTTP handler. +type Webhook struct { + scheme *runtime.Scheme + decoder *Decoder +} + +// InjectScheme injects a scheme into the webhook, in order to construct a Decoder. +func (wh *Webhook) InjectScheme(s *runtime.Scheme) error { + var err error + wh.scheme = s + wh.decoder, err = NewDecoder(s) + if err != nil { + return err + } + + return nil +} + +// ensure Webhook implements http.Handler +var _ http.Handler = &Webhook{} + +func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { + convertReview := &apix.ConversionReview{} + err := json.NewDecoder(r.Body).Decode(convertReview) + if err != nil { + log.Error(err, "failed to read conversion request") + w.WriteHeader(http.StatusBadRequest) + return + } + + // TODO(droot): may be move the conversion logic to a separate module to + // decouple it from the http layer ? + resp, err := wh.handleConvertRequest(convertReview.Request) + if err != nil { + log.Error(err, "failed to convert", "request", convertReview.Request.UID) + convertReview.Response = errored(err) + } else { + convertReview.Response = resp + } + convertReview.Response.UID = convertReview.Request.UID + convertReview.Request = nil + + err = json.NewEncoder(w).Encode(convertReview) + if err != nil { + log.Error(err, "failed to write response") + return + } +} + +// handles a version conversion request. +func (wh *Webhook) handleConvertRequest(req *apix.ConversionRequest) (*apix.ConversionResponse, error) { + if req == nil { + return nil, fmt.Errorf("conversion request is nil") + } + var objects []runtime.RawExtension + + for _, obj := range req.Objects { + src, gvk, err := wh.decoder.Decode(obj.Raw) + if err != nil { + return nil, err + } + dst, err := wh.allocateDstObject(req.DesiredAPIVersion, gvk.Kind) + if err != nil { + return nil, err + } + err = wh.convertObject(src, dst) + if err != nil { + return nil, err + } + objects = append(objects, runtime.RawExtension{Object: dst}) + } + return &apix.ConversionResponse{ + UID: req.UID, + ConvertedObjects: objects, + Result: metav1.Status{ + Status: metav1.StatusSuccess, + }, + }, nil +} + +// convertObject will convert given a src object to dst object. +// Note(droot): couldn't find a way to reduce the cyclomatic complexity under 10 +// without compromising readability, so disabling gocyclo linter +func (wh *Webhook) convertObject(src, dst runtime.Object) error { + srcGVK := src.GetObjectKind().GroupVersionKind() + dstGVK := dst.GetObjectKind().GroupVersionKind() + + if srcGVK.GroupKind() != dstGVK.GroupKind() { + return fmt.Errorf("src %T and dst %T does not belong to same API Group", src, dst) + } + + if srcGVK == dstGVK { + return fmt.Errorf("conversion is not allowed between same type %T", src) + } + + srcIsHub, dstIsHub := isHub(src), isHub(dst) + srcIsConvertible, dstIsConvertible := isConvertible(src), isConvertible(dst) + + switch { + case srcIsHub && dstIsConvertible: + return dst.(conversion.Convertible).ConvertFrom(src.(conversion.Hub)) + case dstIsHub && srcIsConvertible: + return src.(conversion.Convertible).ConvertTo(dst.(conversion.Hub)) + case srcIsConvertible && dstIsConvertible: + return wh.convertViaHub(src.(conversion.Convertible), dst.(conversion.Convertible)) + default: + return fmt.Errorf("%T is not convertible to %T", src, dst) + } +} + +func (wh *Webhook) convertViaHub(src, dst conversion.Convertible) error { + hub, err := wh.getHub(src) + if err != nil { + return err + } + + if hub == nil { + return fmt.Errorf("%s does not have any Hub defined", src) + } + + err = src.ConvertTo(hub) + if err != nil { + return fmt.Errorf("%T failed to convert to hub version %T : %w", src, hub, err) + } + + err = dst.ConvertFrom(hub) + if err != nil { + return fmt.Errorf("%T failed to convert from hub version %T : %w", dst, hub, err) + } + + return nil +} + +// getHub returns an instance of the Hub for passed-in object's group/kind. +func (wh *Webhook) getHub(obj runtime.Object) (conversion.Hub, error) { + gvks, err := objectGVKs(wh.scheme, obj) + if err != nil { + return nil, err + } + if len(gvks) == 0 { + return nil, fmt.Errorf("error retrieving gvks for object : %v", obj) + } + + var hub conversion.Hub + var hubFoundAlready bool + for _, gvk := range gvks { + instance, err := wh.scheme.New(gvk) + if err != nil { + return nil, fmt.Errorf("failed to allocate an instance for gvk %v: %w", gvk, err) + } + if val, isHub := instance.(conversion.Hub); isHub { + if hubFoundAlready { + return nil, fmt.Errorf("multiple hub version defined for %T", obj) + } + hubFoundAlready = true + hub = val + } + } + return hub, nil +} + +// allocateDstObject returns an instance for a given GVK. +func (wh *Webhook) allocateDstObject(apiVersion, kind string) (runtime.Object, error) { + gvk := schema.FromAPIVersionAndKind(apiVersion, kind) + + obj, err := wh.scheme.New(gvk) + if err != nil { + return obj, err + } + + t, err := meta.TypeAccessor(obj) + if err != nil { + return obj, err + } + + t.SetAPIVersion(apiVersion) + t.SetKind(kind) + + return obj, nil +} + +// IsConvertible determines if given type is convertible or not. For a type +// to be convertible, the group-kind needs to have a Hub type defined and all +// non-hub types must be able to convert to/from Hub. +func IsConvertible(scheme *runtime.Scheme, obj runtime.Object) (bool, error) { + var hubs, spokes, nonSpokes []runtime.Object + + gvks, err := objectGVKs(scheme, obj) + if err != nil { + return false, err + } + if len(gvks) == 0 { + return false, fmt.Errorf("error retrieving gvks for object : %v", obj) + } + + for _, gvk := range gvks { + instance, err := scheme.New(gvk) + if err != nil { + return false, fmt.Errorf("failed to allocate an instance for gvk %v: %w", gvk, err) + } + + if isHub(instance) { + hubs = append(hubs, instance) + continue + } + + if !isConvertible(instance) { + nonSpokes = append(nonSpokes, instance) + continue + } + + spokes = append(spokes, instance) + } + + if len(gvks) == 1 { + return false, nil // single version + } + + if len(hubs) == 0 && len(spokes) == 0 { + // multiple version detected with no conversion implementation. This is + // true for multi-version built-in types. + return false, nil + } + + if len(hubs) == 1 && len(nonSpokes) == 0 { // convertible + return true, nil + } + + return false, PartialImplementationError{ + hubs: hubs, + nonSpokes: nonSpokes, + spokes: spokes, + } +} + +// objectGVKs returns all (Group,Version,Kind) for the Group/Kind of given object. +func objectGVKs(scheme *runtime.Scheme, obj runtime.Object) ([]schema.GroupVersionKind, error) { + // NB: we should not use `obj.GetObjectKind().GroupVersionKind()` to get the + // GVK here, since it is parsed from apiVersion and kind fields and it may + // return empty GVK if obj is an uninitialized object. + objGVKs, _, err := scheme.ObjectKinds(obj) + if err != nil { + return nil, err + } + if len(objGVKs) != 1 { + return nil, fmt.Errorf("expect to get only one GVK for %v", obj) + } + objGVK := objGVKs[0] + knownTypes := scheme.AllKnownTypes() + + var gvks []schema.GroupVersionKind + for gvk := range knownTypes { + if objGVK.GroupKind() == gvk.GroupKind() { + gvks = append(gvks, gvk) + } + } + return gvks, nil +} + +// PartialImplementationError represents an error due to partial conversion +// implementation such as hub without spokes, multiple hubs or spokes without hub. +type PartialImplementationError struct { + gvk schema.GroupVersionKind + hubs []runtime.Object + nonSpokes []runtime.Object + spokes []runtime.Object +} + +func (e PartialImplementationError) Error() string { + if len(e.hubs) == 0 { + return fmt.Sprintf("no hub defined for gvk %s", e.gvk) + } + if len(e.hubs) > 1 { + return fmt.Sprintf("multiple(%d) hubs defined for group-kind '%s' ", + len(e.hubs), e.gvk.GroupKind()) + } + if len(e.nonSpokes) > 0 { + return fmt.Sprintf("%d inconvertible types detected for group-kind '%s'", + len(e.nonSpokes), e.gvk.GroupKind()) + } + return "" +} + +// isHub determines if passed-in object is a Hub or not. +func isHub(obj runtime.Object) bool { + _, yes := obj.(conversion.Hub) + return yes +} + +// isConvertible determines if passed-in object is a convertible. +func isConvertible(obj runtime.Object) bool { + _, yes := obj.(conversion.Convertible) + return yes +} + +// helper to construct error response. +func errored(err error) *apix.ConversionResponse { + return &apix.ConversionResponse{ + Result: metav1.Status{ + Status: metav1.StatusFailure, + Message: err.Error(), + }, + } +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go new file mode 100644 index 000000000..6a9e9c236 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +// Decoder knows how to decode the contents of a CRD version conversion +// request into a concrete object. +// TODO(droot): consider reusing decoder from admission pkg for this. +type Decoder struct { + codecs serializer.CodecFactory +} + +// NewDecoder creates a Decoder given the runtime.Scheme +func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) { + return &Decoder{codecs: serializer.NewCodecFactory(scheme)}, nil +} + +// Decode decodes the inlined object. +func (d *Decoder) Decode(content []byte) (runtime.Object, *schema.GroupVersionKind, error) { + deserializer := d.codecs.UniversalDeserializer() + return deserializer.Decode(content, nil, nil) +} + +// DecodeInto decodes the inlined object in the into the passed-in runtime.Object. +func (d *Decoder) DecodeInto(content []byte, into runtime.Object) error { + deserializer := d.codecs.UniversalDeserializer() + return runtime.DecodeInto(deserializer, content, into) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/doc.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/doc.go new file mode 100644 index 000000000..2c93f0d99 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package webhook provides methods to build and bootstrap a webhook server. + +Currently, it only supports admission webhooks. It will support CRD conversion webhooks in the near future. +*/ +package webhook + +import ( + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("webhook") diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics/metrics.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics/metrics.go new file mode 100644 index 000000000..557004908 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics/metrics.go @@ -0,0 +1,85 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var ( + // RequestLatency is a prometheus metric which is a histogram of the latency + // of processing admission requests. + RequestLatency = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "controller_runtime_webhook_latency_seconds", + Help: "Histogram of the latency of processing admission requests", + }, + []string{"webhook"}, + ) + + // RequestTotal is a prometheus metric which is a counter of the total processed admission requests. + RequestTotal = func() *prometheus.CounterVec { + return prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "controller_runtime_webhook_requests_total", + Help: "Total number of admission requests by HTTP status code.", + }, + []string{"webhook", "code"}, + ) + }() + + // RequestInFlight is a prometheus metric which is a gauge of the in-flight admission requests. + RequestInFlight = func() *prometheus.GaugeVec { + return prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "controller_runtime_webhook_requests_in_flight", + Help: "Current number of admission requests being served.", + }, + []string{"webhook"}, + ) + }() +) + +func init() { + metrics.Registry.MustRegister(RequestLatency, RequestTotal, RequestInFlight) +} + +// InstrumentedHook adds some instrumentation on top of the given webhook. +func InstrumentedHook(path string, hookRaw http.Handler) http.Handler { + lbl := prometheus.Labels{"webhook": path} + + lat := RequestLatency.MustCurryWith(lbl) + cnt := RequestTotal.MustCurryWith(lbl) + gge := RequestInFlight.With(lbl) + + // Initialize the most likely HTTP status codes. + cnt.WithLabelValues("200") + cnt.WithLabelValues("500") + + return promhttp.InstrumentHandlerDuration( + lat, + promhttp.InstrumentHandlerCounter( + cnt, + promhttp.InstrumentHandlerInFlight(gge, hookRaw), + ), + ) +} diff --git a/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go new file mode 100644 index 000000000..fa56be5d5 --- /dev/null +++ b/controlplane/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go @@ -0,0 +1,345 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "strconv" + "sync" + "time" + + "k8s.io/apimachinery/pkg/runtime" + kscheme "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/certwatcher" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/internal/httpserver" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" +) + +// DefaultPort is the default port that the webhook server serves. +var DefaultPort = 9443 + +// Server is an admission webhook server that can serve traffic and +// generates related k8s resources for deploying. +// +// TLS is required for a webhook to be accessed by kubernetes, so +// you must provide a CertName and KeyName or have valid cert/key +// at the default locations (tls.crt and tls.key). If you do not +// want to configure TLS (i.e for testing purposes) run an +// admission.StandaloneWebhook in your own server. +type Server struct { + // Host is the address that the server will listen on. + // Defaults to "" - all addresses. + Host string + + // Port is the port number that the server will serve. + // It will be defaulted to 9443 if unspecified. + Port int + + // CertDir is the directory that contains the server key and certificate. The + // server key and certificate. + CertDir string + + // CertName is the server certificate name. Defaults to tls.crt. + CertName string + + // KeyName is the server key name. Defaults to tls.key. + KeyName string + + // ClientCAName is the CA certificate name which server used to verify remote(client)'s certificate. + // Defaults to "", which means server does not verify client's certificate. + ClientCAName string + + // TLSVersion is the minimum version of TLS supported. Accepts + // "", "1.0", "1.1", "1.2" and "1.3" only ("" is equivalent to "1.0" for backwards compatibility) + TLSMinVersion string + + // TLSOpts is used to allow configuring the TLS config used for the server + TLSOpts []func(*tls.Config) + + // WebhookMux is the multiplexer that handles different webhooks. + WebhookMux *http.ServeMux + + // webhooks keep track of all registered webhooks for dependency injection, + // and to provide better panic messages on duplicate webhook registration. + webhooks map[string]http.Handler + + // setFields allows injecting dependencies from an external source + setFields inject.Func + + // defaultingOnce ensures that the default fields are only ever set once. + defaultingOnce sync.Once + + // started is set to true immediately before the server is started + // and thus can be used to check if the server has been started + started bool + + // mu protects access to the webhook map & setFields for Start, Register, etc + mu sync.Mutex +} + +// setDefaults does defaulting for the Server. +func (s *Server) setDefaults() { + s.webhooks = map[string]http.Handler{} + if s.WebhookMux == nil { + s.WebhookMux = http.NewServeMux() + } + + if s.Port <= 0 { + s.Port = DefaultPort + } + + if len(s.CertDir) == 0 { + s.CertDir = filepath.Join(os.TempDir(), "k8s-webhook-server", "serving-certs") + } + + if len(s.CertName) == 0 { + s.CertName = "tls.crt" + } + + if len(s.KeyName) == 0 { + s.KeyName = "tls.key" + } +} + +// NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates +// the webhook server doesn't need leader election. +func (*Server) NeedLeaderElection() bool { + return false +} + +// Register marks the given webhook as being served at the given path. +// It panics if two hooks are registered on the same path. +func (s *Server) Register(path string, hook http.Handler) { + s.mu.Lock() + defer s.mu.Unlock() + + s.defaultingOnce.Do(s.setDefaults) + if _, found := s.webhooks[path]; found { + panic(fmt.Errorf("can't register duplicate path: %v", path)) + } + // TODO(directxman12): call setfields if we've already started the server + s.webhooks[path] = hook + s.WebhookMux.Handle(path, metrics.InstrumentedHook(path, hook)) + + regLog := log.WithValues("path", path) + regLog.Info("Registering webhook") + + // we've already been "started", inject dependencies here. + // Otherwise, InjectFunc will do this for us later. + if s.setFields != nil { + if err := s.setFields(hook); err != nil { + // TODO(directxman12): swallowing this error isn't great, but we'd have to + // change the signature to fix that + regLog.Error(err, "unable to inject fields into webhook during registration") + } + + baseHookLog := log.WithName("webhooks") + + // NB(directxman12): we don't propagate this further by wrapping setFields because it's + // unclear if this is how we want to deal with log propagation. In this specific instance, + // we want to be able to pass a logger to webhooks because they don't know their own path. + if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", path), hook); err != nil { + regLog.Error(err, "unable to logger into webhook during registration") + } + } +} + +// StartStandalone runs a webhook server without +// a controller manager. +func (s *Server) StartStandalone(ctx context.Context, scheme *runtime.Scheme) error { + // Use the Kubernetes client-go scheme if none is specified + if scheme == nil { + scheme = kscheme.Scheme + } + + if err := s.InjectFunc(func(i interface{}) error { + if _, err := inject.SchemeInto(scheme, i); err != nil { + return err + } + return nil + }); err != nil { + return err + } + + return s.Start(ctx) +} + +// tlsVersion converts from human-readable TLS version (for example "1.1") +// to the values accepted by tls.Config (for example 0x301). +func tlsVersion(version string) (uint16, error) { + switch version { + // default is previous behaviour + case "": + return tls.VersionTLS10, nil + case "1.0": + return tls.VersionTLS10, nil + case "1.1": + return tls.VersionTLS11, nil + case "1.2": + return tls.VersionTLS12, nil + case "1.3": + return tls.VersionTLS13, nil + default: + return 0, fmt.Errorf("invalid TLSMinVersion %v: expects 1.0, 1.1, 1.2, 1.3 or empty", version) + } +} + +// Start runs the server. +// It will install the webhook related resources depend on the server configuration. +func (s *Server) Start(ctx context.Context) error { + s.defaultingOnce.Do(s.setDefaults) + + baseHookLog := log.WithName("webhooks") + baseHookLog.Info("Starting webhook server") + + certPath := filepath.Join(s.CertDir, s.CertName) + keyPath := filepath.Join(s.CertDir, s.KeyName) + + certWatcher, err := certwatcher.New(certPath, keyPath) + if err != nil { + return err + } + + go func() { + if err := certWatcher.Start(ctx); err != nil { + log.Error(err, "certificate watcher error") + } + }() + + tlsMinVersion, err := tlsVersion(s.TLSMinVersion) + if err != nil { + return err + } + + cfg := &tls.Config{ //nolint:gosec + NextProtos: []string{"h2"}, + GetCertificate: certWatcher.GetCertificate, + MinVersion: tlsMinVersion, + } + + // load CA to verify client certificate + if s.ClientCAName != "" { + certPool := x509.NewCertPool() + clientCABytes, err := os.ReadFile(filepath.Join(s.CertDir, s.ClientCAName)) + if err != nil { + return fmt.Errorf("failed to read client CA cert: %w", err) + } + + ok := certPool.AppendCertsFromPEM(clientCABytes) + if !ok { + return fmt.Errorf("failed to append client CA cert to CA pool") + } + + cfg.ClientCAs = certPool + cfg.ClientAuth = tls.RequireAndVerifyClientCert + } + + // fallback TLS config ready, will now mutate if passer wants full control over it + for _, op := range s.TLSOpts { + op(cfg) + } + + listener, err := tls.Listen("tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), cfg) + if err != nil { + return err + } + + log.Info("Serving webhook server", "host", s.Host, "port", s.Port) + + srv := httpserver.New(s.WebhookMux) + + idleConnsClosed := make(chan struct{}) + go func() { + <-ctx.Done() + log.Info("shutting down webhook server") + + // TODO: use a context with reasonable timeout + if err := srv.Shutdown(context.Background()); err != nil { + // Error from closing listeners, or context timeout + log.Error(err, "error shutting down the HTTP server") + } + close(idleConnsClosed) + }() + + s.mu.Lock() + s.started = true + s.mu.Unlock() + if err := srv.Serve(listener); err != nil && err != http.ErrServerClosed { + return err + } + + <-idleConnsClosed + return nil +} + +// StartedChecker returns an healthz.Checker which is healthy after the +// server has been started. +func (s *Server) StartedChecker() healthz.Checker { + config := &tls.Config{ + InsecureSkipVerify: true, // nolint:gosec // config is used to connect to our own webhook port. + } + return func(req *http.Request) error { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.started { + return fmt.Errorf("webhook server has not been started yet") + } + + d := &net.Dialer{Timeout: 10 * time.Second} + conn, err := tls.DialWithDialer(d, "tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), config) + if err != nil { + return fmt.Errorf("webhook server is not reachable: %w", err) + } + + if err := conn.Close(); err != nil { + return fmt.Errorf("webhook server is not reachable: closing connection: %w", err) + } + + return nil + } +} + +// InjectFunc injects the field setter into the server. +func (s *Server) InjectFunc(f inject.Func) error { + s.setFields = f + + // inject fields here that weren't injected in Register because we didn't have setFields yet. + baseHookLog := log.WithName("webhooks") + for hookPath, webhook := range s.webhooks { + if err := s.setFields(webhook); err != nil { + return err + } + + // NB(directxman12): we don't propagate this further by wrapping setFields because it's + // unclear if this is how we want to deal with log propagation. In this specific instance, + // we want to be able to pass a logger to webhooks because they don't know their own path. + if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", hookPath), webhook); err != nil { + return err + } + } + return nil +}