diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index e9e78fc2676..5198e221d3c 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -5,7 +5,7 @@ jobs: name: Validate runs-on: ubuntu-latest # build-tools is built from ../../tools/build-tools.Dockerfile - container: kedacore/build-tools:latest + container: kedacore/build-tools:v2 steps: - name: Check out code uses: actions/checkout@v1 @@ -20,6 +20,9 @@ jobs: restore-keys: | ${{ runner.os }}-go- + - name: Verify Generated clientset is up to date + run: make verify-clientset + - name: Test run: make test diff --git a/.github/workflows/v2-build.yml b/.github/workflows/v2-build.yml new file mode 100644 index 00000000000..ca0a32d380e --- /dev/null +++ b/.github/workflows/v2-build.yml @@ -0,0 +1,46 @@ +name: master build +on: + push: + branches: + - v2 +jobs: + validate: + name: Validate + runs-on: ubuntu-latest + # build-tools is built from ../../tools/build-tools.Dockerfile + container: kedacore/build-tools:v2 + steps: + - name: Check out code + uses: actions/checkout@v1 + with: + fetch-depth: 1 + + - name: Go modules cache + uses: actions/cache@v1 + with: + path: /go/pkg + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Test + run: make test + + - name: Login to Docker Hub + env: + DOCKER_HUB_ACCESS_TOKEN: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} + DOCKER_HUB_USERNAME: ${{ secrets.DOCKER_HUB_USERNAME }} + run: echo $DOCKER_HUB_ACCESS_TOKEN | docker login -u $DOCKER_HUB_USERNAME --password-stdin + + - name: Publish + run: make publish + + - name: Run end to end tests + env: + AZURE_SUBSCRIPTION: ${{ secrets.AZURE_SUBSCRIPTION }} + AZURE_RESOURCE_GROUP: ${{ secrets.AZURE_RESOURCE_GROUP }} + AZURE_SP_ID: ${{ secrets.AZURE_SP_ID }} + AZURE_SP_KEY: ${{ secrets.AZURE_SP_KEY }} + AZURE_SP_TENANT: ${{ secrets.AZURE_SP_TENANT }} + TEST_STORAGE_CONNECTION_STRING: ${{ secrets.TEST_STORAGE_CONNECTION_STRING }} + run: make e2e-test diff --git a/CHANGELOG.md b/CHANGELOG.md index 9e1ad129777..3f4d59e296c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,11 +2,9 @@ ## Deprecations -- As of v1.3, support for `brokerList` is deprecated for our Kafka topic scaler and will be removed in v2.0 ([#632](https://github.com/kedacore/keda/issues/632)) - ## History -- [v1.5.0](#v150) +- [v2.0.0](#v200) - [v1.4.1](#v141) - [v1.4.0](#v140) - [v1.3.0](#v130) @@ -14,37 +12,33 @@ - [v1.1.0](#v110) - [v1.0.0](#v100) -## v1.5.0 - -Learn more about our release in [our milestone](https://github.com/kedacore/keda/milestone/12). +## v2.0.0 ### New -- **Scalers** - - Introduce Active MQ Artemis scaler ([Docs](https://keda.sh/docs/1.5/scalers/artemis/)) - - Introduce Redis Streams scaler ([Docs](https://keda.sh/docs/1.5/scalers/redis-streams/) | [Details](https://github.com/kedacore/keda/issues/746)) - - Introduce Cron scaler ([Docs](https://keda.sh/docs/1.5/scalers/cron/) | [Details](https://github.com/kedacore/keda/issues/812)) -- **Secret Providers** - - Introduce HashiCorp Vault secret provider ([Docs](https://keda.sh/docs/1.5/concepts/authentication/#hashicorp-vault-secrets) | [Details](https://github.com/kedacore/keda/issues/673)) -- **Other** - - Introduction of `nodeSelector` in raw YAML deployment specifications ([Details](https://github.com/kedacore/keda/pull/856)) +- KEDA scales any CustomResource that implements Scale subresource ([#703](https://github.com/kedacore/keda/issues/703)) +- Provide KEDA go-client ([#494](https://github.com/kedacore/keda/issues/494)) +- Define KEDA readiness and liveness probes ([#788](https://github.com/kedacore/keda/issues/788)) +- KEDA Support for configurable scaling behavior in HPA v2beta2 ([#802](https://github.com/kedacore/keda/issues/802)) +- Adding Standard Resource metrics to KEDA ([#874](https://github.com/kedacore/keda/pull/874)) ### Improvements -- Improved message count determination when using `includeUnacked` in RabbitMQ scaler ([Details](https://github.com/kedacore/keda/pull/781)) -- Fix for blank path without trailing slash in RabbitMQ scaler ([Details](https://github.com/kedacore/keda/issues/790)) -- Improved parsing of connection strings to support `BlobEndpoint`, `QueueEndpoint`, `TableEndpoint` & `FileEndpoint` segments ([Details](https://github.com/kedacore/keda/issues/821)) -- Support scaling when no storage checkpoint exists in Azure Event Hubs scaler ([Details](https://github.com/kedacore/keda/issues/797)) -- GCP Pub Scaler should not panic on invalid credentials ([Details](https://github.com/kedacore/keda/issues/616)) -- Make `queueLength` optional in RabbitMQ scaler ([Details](https://github.com/kedacore/keda/issues/880)) +- HPA: move from autoscaling v2beta1 to v2beta2 ([#721](https://github.com/kedacore/keda/issues/721)) +- Introduce shortnames for CRDs ([#774](https://github.com/kedacore/keda/issues/774)) +- kubectl get scaledobject should show related trigger authentication ([#777](https://github.com/kedacore/keda/issues/777)) +- kubectl get triggerauthentication should show information about configured parameters ([#778](https://github.com/kedacore/keda/issues/778)) ### Breaking Changes -None. +- Change apiGroup from keda.k8s.io to keda.sh ([#552](https://github.com/kedacore/keda/issues/552)) +- Introduce a separate ScaledObject and ScaledJob([#653](https://github.com/kedacore/keda/issues/653)) +- Remove `New()` and `Close()` from the interface of `service ExternalScaler` in `externalscaler.proto`. +- Removed deprecated brokerList for Kafka scaler ([#882](https://github.com/kedacore/keda/pull/882)) ### Other - -None. +- Update Operator SDK and k8s deps ([#870](https://github.com/kedacore/keda/issues/870)) +- Added ScaledObject Status Conditions to display status of scaling ([#750](https://github.com/kedacore/keda/pull/750)) ## v1.4.1 @@ -107,7 +101,6 @@ None. ### Improvements - Make targetQueryValue configurable in postgreSQL scaler ([#643](https://github.com/kedacore/keda/pull/643)) -- Added bootstrapServers to deprecate brokerList ([#621](https://github.com/kedacore/keda/pull/621)) - Removed the need for deploymentName label ([#644](https://github.com/kedacore/keda/pull/644)) - Adding Kubernetes recommended labels to resources ([#596](https://github.com/kedacore/keda/pull/596)) diff --git a/CREATE-NEW-SCALER.md b/CREATE-NEW-SCALER.md index d15b4c77c77..4c094b4b9c8 100644 --- a/CREATE-NEW-SCALER.md +++ b/CREATE-NEW-SCALER.md @@ -8,14 +8,14 @@ In order to develop a scaler, a developer should do the following: 2. Create the new scaler struct under the `pkg/scalers` folder. 3. Implement the methods defined in the [scaler interface](#scaler-interface) section. 4. Create a constructor according to [this](#constructor). -5. Change the `getScaler` function in `pkg/handler/scale_handler.go` by adding another switch case that matches your scaler. +5. Change the `getScaler` function in `pkg/scaling/scale_handler.go` by adding another switch case that matches your scaler. 6. Run `make build` from the root of KEDA and your scaler is ready. -If you want to deploy locally +If you want to deploy locally 1. Run `export VERSION=local` 2. Open the terminal and go to the root of the source code 3. Run `make build` -5. If you haven't done it yet clone the charts repository: `git clone git@github.com:kedacore/charts.git` +5. If you haven't done it yet clone the charts repository: `git clone git@github.com:kedacore/charts.git` 6. In the terminal, navigate to the `chart/keda` folder (the charts downloaded in step 3), and run the following command (don't forget to replace the placeholder text in the command) `helm install . --set image.keda=kedacore/keda:[tag used in step 1],image.pullPolicy=IfNotPresent`. The last step assumes that you have `helm` already installed in the cluster. In this step we install the helm chart, and we substitute the image with the image we built in step 1. Notice that we are also overriding the image PullPolice to `IfNotPresent` since this is a local cluster. @@ -29,12 +29,12 @@ The scalers in KEDA are implementations of a KEDA `Scaler` Go interface declared This is the key function of a scaler; it returns a value that represents a current state of an external metric (e.g. length of a queue). The return type is an `ExternalMetricValue` struct which has the following fields: - `MetricName`: this is the name of the metric that we are returning. - `Timestamp`: indicates the time at which the metrics were produced. -- `WindowSeconds`: //TODO +- `WindowSeconds`: //TODO - `Value`: A numerical value that represents the state of the metric. It could be the length of a queue, or it can be the amount of lag in a stream, but it can also be a simple representation of the state. -Kubernetes HPA (Horizontal Pod Autoscaler) will poll `GetMetrics` reulgarly through KEDA's metric server (as long as there is at least one pod), and compare the returned value to a configured value in the ScaledObject configuration. Kubernetes will use the following formula to decide whether to scale the pods up and down: +Kubernetes HPA (Horizontal Pod Autoscaler) will poll `GetMetrics` reulgarly through KEDA's metric server (as long as there is at least one pod), and compare the returned value to a configured value in the ScaledObject configuration. Kubernetes will use the following formula to decide whether to scale the pods up and down: -`desiredReplicas = ceil[currentReplicas * ( currentMetricValue / desiredMetricValue )]`. +`desiredReplicas = ceil[currentReplicas * ( currentMetricValue / desiredMetricValue )]`. For more details check [Kubernetes HPA documentation](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/). @@ -50,7 +50,7 @@ The return type of this function is `MetricSpec`, but in KEDA's case we will mos ### IsActive -For some reason, the scaler might need to declare itself as in-active, and the way it can do this is through implementing the function `IsActive`. +For some reason, the scaler might need to declare itself as in-active, and the way it can do this is through implementing the function `IsActive`. KEDA polls ScaledObject object according to the `pollingInterval` confiugred in the ScaledObject; it checks the last time it was polled, it checks if the number of replicas is greater than 0, and if the scaler itself is active. So if the scaler returns false for `IsActive`, and if current number of replicas is greater than 0, and there is no configured minimum pods, then KEDA scales down to 0. @@ -58,7 +58,7 @@ KEDA polls ScaledObject object according to the `pollingInterval` confiugred in After each poll on the scaler to retrieve the metrics, KEDA calls this function for each scaler to give the scaler the opportunity to close any resources, like http clients for example. ### Constructor -What is missing from the `scaler` interface is a function that constructs the scaler itself. Up until the moment of writing this document, KEDA does not have a dynamic way to load scalers (at least not officially)[***]; instead scalers are part of KEDA's code-base, and they are shipped with KEDA's binary. +What is missing from the `scaler` interface is a function that constructs the scaler itself. Up until the moment of writing this document, KEDA does not have a dynamic way to load scalers (at least not officially)[***]; instead scalers are part of KEDA's code-base, and they are shipped with KEDA's binary. Thus, each scaler should have a constructing function, KEDA will [explicitly invoke](https://github.com/kedacore/keda/blob/4d0cf5ef09ef348cf3a158634910f00741ae5258/pkg/handler/scale_handler.go#L565) the construction function based on the `trigger` property configured in the ScaledObject. diff --git a/Makefile b/Makefile index 6ad9987b5d7..2744b121dfb 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ ################################################## # Variables # ################################################## -VERSION ?= master +VERSION ?= 2.0.0-alpha1 IMAGE_REGISTRY ?= docker.io IMAGE_REPO ?= kedacore @@ -88,8 +88,12 @@ ifndef GOROOT @echo "WARNING: GOROOT is not defined" endif +.PHONY: gofmt +gofmt: + go fmt ./... + .PHONY: build -build: checkenv build-adapter build-controller +build: gofmt checkenv build-adapter build-controller .PHONY: build-controller build-controller: generate-api pkg/scalers/liiklus/LiiklusService.pb.go @@ -107,10 +111,28 @@ build-adapter: generate-api pkg/scalers/liiklus/LiiklusService.pb.go .PHONY: generate-api generate-api: $(GO_BUILD_VARS) operator-sdk generate k8s - $(GO_BUILD_VARS) operator-sdk generate openapi + $(GO_BUILD_VARS) operator-sdk generate crds --crd-version=v1beta1 + # withTriggers and withPods are only used for duck typing so we only need the deepcopy methods + # However operator-sdk generate doesn't appear to have an option for that + # until this issue is fixed: https://github.com/kubernetes-sigs/controller-tools/issues/398 + rm deploy/crds/keda.sh_withtriggers_crd.yaml + rm deploy/crds/keda.sh_withpods_crd.yaml pkg/scalers/liiklus/LiiklusService.pb.go: hack/LiiklusService.proto protoc -I hack/ hack/LiiklusService.proto --go_out=plugins=grpc:pkg/scalers/liiklus pkg/scalers/liiklus/mocks/mock_liiklus.go: pkg/scalers/liiklus/LiiklusService.pb.go mockgen github.com/kedacore/keda/pkg/scalers/liiklus LiiklusServiceClient > pkg/scalers/liiklus/mocks/mock_liiklus.go + +################################################## +# Clientset # +################################################## +.PHONY: verify-clientset +verify-clientset: + $(GO_BUILD_VARS) go mod vendor + ./hack/verify-codegen.sh + +.PHONY: generate-clientset +generate-clientset: + $(GO_BUILD_VARS) go mod vendor + ./hack/update-codegen.sh \ No newline at end of file diff --git a/README.md b/README.md index 10f9cfb7506..0bf94a6b736 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,17 @@ +

This branch contains unstable KEDA v2.0.0-alpha1, currently under development

+ +## How can I try KEDA v2 alpha version? +Make sure to remove previous KEDA (including CRD) from the cluster. Switch to the `v2` branch and deploy yaml files: +```bash + git fetch --all + git checkout v2 + kubectl apply -f deploy/crds/keda.sh_scaledobjects_crd.yaml + kubectl apply -f deploy/crds/keda.sh_scaledjobs_crd.yaml + kubectl apply -f deploy/crds/keda.sh_triggerauthentications_crd.yaml + kubectl apply -f deploy/ +``` + +

Kubernetes-based Event Driven Autoscaling

@@ -120,8 +134,9 @@ To be KEDA to be fully operational we need to deploy Metrics Server first. 1. Deploy CRDs and KEDA into `keda` namespace ```bash - kubectl apply -f deploy/crds/keda.k8s.io_scaledobjects_crd.yaml - kubectl apply -f deploy/crds/keda.k8s.io_triggerauthentications_crd.yaml + kubectl apply -f deploy/crds/keda.sh_scaledobjects_crd.yaml + kubectl apply -f deploy/crds/keda.sh_scaledjob_crd.yaml + kubectl apply -f deploy/crds/keda.sh_triggerauthentications_crd.yaml kubectl apply -f deploy/ ``` 2. Scale down `keda-operator` Deployment diff --git a/build/bin/entrypoint b/build/bin/entrypoint index 90c629001f1..457186bd899 100755 --- a/build/bin/entrypoint +++ b/build/bin/entrypoint @@ -1,11 +1,3 @@ #!/bin/sh -e -# This is documented here: -# https://docs.openshift.com/container-platform/3.11/creating_images/guidelines.html#openshift-specific-guidelines - -if ! whoami &>/dev/null; then - if [ -w /etc/passwd ]; then - echo "${USER_NAME:-keda}:x:$(id -u):$(id -g):${USER_NAME:-keda} user:${HOME}:/sbin/nologin" >> /etc/passwd - fi -fi exec ${OPERATOR} $@ diff --git a/build/bin/user_setup b/build/bin/user_setup index 59197998b89..a1f82aaffb8 100755 --- a/build/bin/user_setup +++ b/build/bin/user_setup @@ -2,6 +2,7 @@ set -x # ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be) +echo "${USER_NAME}:x:${USER_UID}:0:${USER_NAME} user:${HOME}:/sbin/nologin" >> /etc/passwd mkdir -p ${HOME} chown ${USER_UID}:0 ${HOME} chmod ug+rwx ${HOME} diff --git a/cmd/adapter/main.go b/cmd/adapter/main.go index bd2f6944ae6..6011bf0fc59 100644 --- a/cmd/adapter/main.go +++ b/cmd/adapter/main.go @@ -6,18 +6,17 @@ import ( "os" "runtime" - "github.com/kedacore/keda/pkg/handler" + kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" kedaprovider "github.com/kedacore/keda/pkg/provider" + "github.com/kedacore/keda/pkg/scaling" "github.com/kedacore/keda/version" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog" - "k8s.io/klog/klogr" - - kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" "github.com/operator-framework/operator-sdk/pkg/k8sutil" appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes/scheme" + "k8s.io/klog" + "k8s.io/klog/klogr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" @@ -62,7 +61,7 @@ func (a *Adapter) makeProviderOrDie() provider.MetricsProvider { os.Exit(1) } - handler := handler.NewScaleHandler(kubeclient, scheme) + handler := scaling.NewScaleHandler(kubeclient, nil, scheme) namespace, err := k8sutil.GetWatchNamespace() if err != nil { diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 5b3222780d1..ce7a4657361 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -2,14 +2,12 @@ package main import ( "context" + "errors" "flag" "fmt" "os" "runtime" - - // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) - _ "k8s.io/client-go/plugin/pkg/client/auth" - "k8s.io/client-go/rest" + "strings" "github.com/kedacore/keda/pkg/apis" "github.com/kedacore/keda/pkg/controller" @@ -20,12 +18,15 @@ import ( "github.com/operator-framework/operator-sdk/pkg/leader" "github.com/operator-framework/operator-sdk/pkg/log/zap" "github.com/operator-framework/operator-sdk/pkg/metrics" - "github.com/operator-framework/operator-sdk/pkg/restmapper" sdkVersion "github.com/operator-framework/operator-sdk/version" "github.com/spf13/pflag" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" + _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/healthz" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager/signals" @@ -41,7 +42,7 @@ var log = logf.Log.WithName("cmd") func printVersion() { log.Info(fmt.Sprintf("KEDA Version: %s", version.Version)) - //log.Info(fmt.Sprintf("KEDA Commit: %s", version.GitCommit)) // multiple -ldflags doesn't work with operator-sdk v0.12, let's reenable this for KEDA v2 + log.Info(fmt.Sprintf("KEDA Commit: %s", version.GitCommit)) log.Info(fmt.Sprintf("Go Version: %s", runtime.Version())) log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)) log.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version)) @@ -91,17 +92,43 @@ func main() { os.Exit(1) } - // Create a new Cmd to provide shared dependencies and start components - mgr, err := manager.New(cfg, manager.Options{ - Namespace: namespace, - MapperProvider: restmapper.NewDynamicRESTMapper, - MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort), - }) + // Set default manager options + options := manager.Options{ + Namespace: namespace, + MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort), + HealthProbeBindAddress: ":8080", + } + + // Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2) + // Note that this is not intended to be used for excluding namespaces, this is better done via a Predicate + // Also note that you may face performance issues when using this with a high number of namespaces. + // More Info: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/cache#MultiNamespacedCacheBuilder + if strings.Contains(namespace, ",") { + options.Namespace = "" + options.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(namespace, ",")) + } + + // Create a new manager to provide shared dependencies and start components + mgr, err := manager.New(cfg, options) if err != nil { log.Error(err, "") os.Exit(1) } + // Add readiness probe + err = mgr.AddReadyzCheck("ready-ping", healthz.Ping) + if err != nil { + log.Error(err, "Unable to add a readiness check") + os.Exit(1) + } + + // Add liveness probe + err = mgr.AddHealthzCheck("health-ping", healthz.Ping) + if err != nil { + log.Error(err, "Unable to add a health check") + os.Exit(1) + } + log.Info("Registering Components.") // Setup Scheme for all resources @@ -116,7 +143,31 @@ func main() { os.Exit(1) } - if err = serveCRMetrics(cfg); err != nil { + // Add the Metrics Service + addMetrics(ctx, cfg) + + log.Info("Starting the Cmd.") + + // Start the Cmd + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + log.Error(err, "Manager exited non-zero") + os.Exit(1) + } +} + +// addMetrics will create the Services and Service Monitors to allow the operator export the metrics by using +// the Prometheus operator +func addMetrics(ctx context.Context, cfg *rest.Config) { + // Get the namespace the operator is currently deployed in. + operatorNs, err := k8sutil.GetOperatorNamespace() + if err != nil { + if errors.Is(err, k8sutil.ErrRunLocal) { + log.Info("Skipping CR metrics server creation; not running in a cluster.") + return + } + } + + if err := serveCRMetrics(cfg, operatorNs); err != nil { log.Info("Could not generate and serve custom resource metrics", "error", err.Error()) } @@ -125,6 +176,7 @@ func main() { {Port: metricsPort, Name: metrics.OperatorPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort}}, {Port: operatorMetricsPort, Name: metrics.CRPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: operatorMetricsPort}}, } + // Create Service object to expose the metrics port(s). service, err := metrics.CreateMetricsService(ctx, cfg, servicePorts) if err != nil { @@ -134,7 +186,9 @@ func main() { // CreateServiceMonitors will automatically create the prometheus-operator ServiceMonitor resources // necessary to configure Prometheus to scrape metrics from this operator. services := []*v1.Service{service} - _, err = metrics.CreateServiceMonitors(cfg, namespace, services) + + // The ServiceMonitor is created in the same namespace where the operator is deployed + _, err = metrics.CreateServiceMonitors(cfg, operatorNs, services) if err != nil { log.Info("Could not create ServiceMonitor object", "error", err.Error()) // If this operator is deployed to a cluster without the prometheus-operator running, it will return @@ -143,32 +197,26 @@ func main() { log.Info("Install prometheus-operator in your cluster to create ServiceMonitor objects", "error", err.Error()) } } - - log.Info("Starting the Cmd.") - - // Start the Cmd - if err := mgr.Start(signals.SetupSignalHandler()); err != nil { - log.Error(err, "Manager exited non-zero") - os.Exit(1) - } } // serveCRMetrics gets the Operator/CustomResource GVKs and generates metrics based on those types. // It serves those metrics on "http://metricsHost:operatorMetricsPort". -func serveCRMetrics(cfg *rest.Config) error { - // Below function returns filtered operator/CustomResource specific GVKs. - // For more control override the below GVK list with your own custom logic. +func serveCRMetrics(cfg *rest.Config, operatorNs string) error { + // The function below returns a list of filtered operator/CR specific GVKs. For more control, override the GVK list below + // with your own custom logic. Note that if you are adding third party API schemas, probably you will need to + // customize this implementation to avoid permissions issues. filteredGVK, err := k8sutil.GetGVKsFromAddToScheme(apis.AddToScheme) if err != nil { return err } - // Get the namespace the operator is currently deployed in. - operatorNs, err := k8sutil.GetOperatorNamespace() + + // The metrics will be generated from the namespaces which are returned here. + // NOTE that passing nil or an empty list of namespaces in GenerateAndServeCRMetrics will result in an error. + ns, err := kubemetrics.GetNamespacesForMetrics(operatorNs) if err != nil { return err } - // To generate metrics in other namespaces, add the values below. - ns := []string{operatorNs} + // Generate and serve custom resource specific metrics. err = kubemetrics.GenerateAndServeCRMetrics(cfg, ns, filteredGVK, metricsHost, operatorMetricsPort) if err != nil { diff --git a/deploy/00-namespace.yaml b/deploy/00-namespace.yaml index d9da7d39dd3..84ae1540e91 100644 --- a/deploy/00-namespace.yaml +++ b/deploy/00-namespace.yaml @@ -3,6 +3,6 @@ kind: Namespace metadata: labels: app.kubernetes.io/name: keda - app.kubernetes.io/version: "1.5.0" + app.kubernetes.io/version: "2.0.0-alpha1" app.kubernetes.io/part-of: keda-operator name: keda diff --git a/deploy/01-service_account.yaml b/deploy/01-service_account.yaml index 6e86ff89db0..83d688eec30 100644 --- a/deploy/01-service_account.yaml +++ b/deploy/01-service_account.yaml @@ -3,7 +3,7 @@ kind: ServiceAccount metadata: labels: app.kubernetes.io/name: keda-operator - app.kubernetes.io/version: "1.5.0" + app.kubernetes.io/version: "2.0.0-alpha1" app.kubernetes.io/part-of: keda-operator name: keda-operator namespace: keda diff --git a/deploy/10-cluster_role.yaml b/deploy/10-cluster_role.yaml index 55b844d4635..6c3152478b8 100644 --- a/deploy/10-cluster_role.yaml +++ b/deploy/10-cluster_role.yaml @@ -3,9 +3,8 @@ kind: ClusterRole metadata: labels: app.kubernetes.io/name: keda-operator - app.kubernetes.io/version: "1.5.0" + app.kubernetes.io/version: "2.0.0-alpha1" app.kubernetes.io/part-of: keda-operator - creationTimestamp: null name: keda-operator rules: - apiGroups: @@ -28,6 +27,7 @@ rules: - deployments - deployments/finalizers - replicasets + - statefulsets verbs: - '*' - apiGroups: @@ -44,7 +44,7 @@ rules: - get - create - apiGroups: - - keda.k8s.io + - keda.sh resources: - '*' verbs: @@ -55,3 +55,15 @@ rules: - horizontalpodautoscalers verbs: - '*' +- apiGroups: + - "*" + resources: + - "*/scale" + verbs: + - "*" +- apiGroups: + - "*" + resources: + - "*" + verbs: + - "get" \ No newline at end of file diff --git a/deploy/11-role_binding.yaml b/deploy/11-role_binding.yaml index e358ec42997..99d94eadc7a 100644 --- a/deploy/11-role_binding.yaml +++ b/deploy/11-role_binding.yaml @@ -3,7 +3,7 @@ kind: ClusterRoleBinding metadata: labels: app.kubernetes.io/name: keda-operator - app.kubernetes.io/version: "1.5.0" + app.kubernetes.io/version: "2.0.0-alpha1" app.kubernetes.io/part-of: keda-operator name: keda-operator roleRef: diff --git a/deploy/12-operator.yaml b/deploy/12-operator.yaml index a8b75217da8..07c7ea193c9 100644 --- a/deploy/12-operator.yaml +++ b/deploy/12-operator.yaml @@ -4,7 +4,7 @@ metadata: labels: app: keda-operator app.kubernetes.io/name: keda-operator - app.kubernetes.io/version: "1.5.0" + app.kubernetes.io/version: "2.0.0-alpha1" app.kubernetes.io/component: operator app.kubernetes.io/part-of: keda-operator name: keda-operator @@ -24,13 +24,30 @@ spec: serviceAccountName: keda-operator containers: - name: keda-operator - image: docker.io/kedacore/keda:1.5.0 + image: docker.io/kedacore/keda:2.0.0-alpha1 command: - keda args: - '--zap-level=info' - '--zap-time-encoding=epoch' imagePullPolicy: Always + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 1000m + memory: 1000Mi + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 25 + readinessProbe: + httpGet: + path: /readyz + port: 8080 + initialDelaySeconds: 20 env: - name: WATCH_NAMESPACE value: "" diff --git a/deploy/20-metrics-cluster_role.yaml b/deploy/20-metrics-cluster_role.yaml index 93429e85fd8..149493766ed 100644 --- a/deploy/20-metrics-cluster_role.yaml +++ b/deploy/20-metrics-cluster_role.yaml @@ -3,7 +3,7 @@ kind: ClusterRole metadata: labels: app.kubernetes.io/name: keda-external-metrics-reader - app.kubernetes.io/version: "1.5.0" + app.kubernetes.io/version: "2.0.0-alpha1" app.kubernetes.io/part-of: keda-operator creationTimestamp: null name: keda-external-metrics-reader diff --git a/deploy/21-metrics-role_binding.yaml b/deploy/21-metrics-role_binding.yaml index a911dba16b1..cc840cad747 100644 --- a/deploy/21-metrics-role_binding.yaml +++ b/deploy/21-metrics-role_binding.yaml @@ -3,7 +3,7 @@ kind: ClusterRoleBinding metadata: labels: app.kubernetes.io/name: keda-system-auth-delegator - app.kubernetes.io/version: "1.5.0" + app.kubernetes.io/version: "2.0.0-alpha1" app.kubernetes.io/part-of: keda-operator name: keda:system:auth-delegator roleRef: @@ -20,7 +20,7 @@ kind: RoleBinding metadata: labels: app.kubernetes.io/name: keda-auth-reader - app.kubernetes.io/version: "1.5.0" + app.kubernetes.io/version: "2.0.0-alpha1" app.kubernetes.io/part-of: keda-operator name: keda-auth-reader namespace: kube-system @@ -38,7 +38,7 @@ kind: ClusterRoleBinding metadata: labels: app.kubernetes.io/name: keda-hpa-controller-external-metrics - app.kubernetes.io/version: "1.5.0" + app.kubernetes.io/version: "2.0.0-alpha1" app.kubernetes.io/part-of: keda-operator name: keda-hpa-controller-external-metrics roleRef: diff --git a/deploy/22-metrics-deployment.yaml b/deploy/22-metrics-deployment.yaml index f2fca202839..576fbbeb5a0 100644 --- a/deploy/22-metrics-deployment.yaml +++ b/deploy/22-metrics-deployment.yaml @@ -4,7 +4,7 @@ metadata: labels: app: keda-metrics-apiserver app.kubernetes.io/name: keda-metrics-apiserver - app.kubernetes.io/version: "1.5.0" + app.kubernetes.io/version: "2.0.0-alpha1" app.kubernetes.io/part-of: keda-operator name: keda-metrics-apiserver namespace: keda @@ -22,8 +22,27 @@ spec: serviceAccountName: keda-operator containers: - name: keda-metrics-apiserver - image: docker.io/kedacore/keda-metrics-adapter:1.5.0 + image: docker.io/kedacore/keda-metrics-adapter:2.0.0-alpha1 imagePullPolicy: Always + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 1000m + memory: 1000Mi + livenessProbe: + httpGet: + scheme: HTTPS + path: /healthz + port: 6443 + initialDelaySeconds: 5 + readinessProbe: + httpGet: + scheme: HTTPS + path: /readyz + port: 6443 + initialDelaySeconds: 5 env: - name: WATCH_NAMESPACE value: "" diff --git a/deploy/23-metrics-service.yaml b/deploy/23-metrics-service.yaml index a56b289242c..72af0497ca8 100644 --- a/deploy/23-metrics-service.yaml +++ b/deploy/23-metrics-service.yaml @@ -4,7 +4,7 @@ kind: Service metadata: labels: app.kubernetes.io/name: keda-metrics-apiserver - app.kubernetes.io/version: "1.5.0" + app.kubernetes.io/version: "2.0.0-alpha1" app.kubernetes.io/part-of: keda-operator name: keda-metrics-apiserver namespace: keda diff --git a/deploy/24-metrics-api_service.yaml b/deploy/24-metrics-api_service.yaml index e57bb9e4997..0eebbb4ae97 100644 --- a/deploy/24-metrics-api_service.yaml +++ b/deploy/24-metrics-api_service.yaml @@ -1,9 +1,9 @@ -apiVersion: apiregistration.k8s.io/v1beta1 +apiVersion: apiregistration.k8s.io/v1 kind: APIService metadata: labels: app.kubernetes.io/name: v1beta1.external.metrics.k8s.io - app.kubernetes.io/version: "1.5.0" + app.kubernetes.io/version: "2.0.0-alpha1" app.kubernetes.io/part-of: keda-operator name: v1beta1.external.metrics.k8s.io spec: diff --git a/deploy/crds/keda.k8s.io_scaledobjects_crd.yaml b/deploy/crds/keda.sh_scaledjobs_crd.yaml similarity index 70% rename from deploy/crds/keda.k8s.io_scaledobjects_crd.yaml rename to deploy/crds/keda.sh_scaledjobs_crd.yaml index a254ce9965b..d01e148134b 100644 --- a/deploy/crds/keda.k8s.io_scaledobjects_crd.yaml +++ b/deploy/crds/keda.sh_scaledjobs_crd.yaml @@ -1,45 +1,53 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: scaledobjects.keda.k8s.io + name: scaledjobs.keda.sh spec: additionalPrinterColumns: - - JSONPath: .spec.scaleTargetRef.deploymentName - name: Deployment - type: string - JSONPath: .spec.triggers[*].type name: Triggers type: string + - JSONPath: .spec.triggers[*].authenticationRef.name + name: Authentication + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=="Active")].status + name: Active + type: string - JSONPath: .metadata.creationTimestamp name: Age type: date - group: keda.k8s.io + group: keda.sh names: - kind: ScaledObject - listKind: ScaledObjectList - plural: scaledobjects - singular: scaledobject + kind: ScaledJob + listKind: ScaledJobList + plural: scaledjobs + shortNames: + - sj + singular: scaledjob scope: Namespaced subresources: status: {} validation: openAPIV3Schema: - description: ScaledObject is a specification for a ScaledObject resource + description: ScaledJob is the Schema for the scaledjobs API properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: ScaledObjectSpec is the spec for a ScaledObject resource + description: ScaledJobSpec defines the desired state of ScaledJob properties: cooldownPeriod: format: int32 @@ -137,11 +145,11 @@ spec: a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/' properties: metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' type: object spec: description: 'Specification of the desired behavior of the pod. - More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' properties: activeDeadlineSeconds: description: Optional duration in seconds the pod may be @@ -887,7 +895,7 @@ spec: type: string optional: description: Specify whether the ConfigMap - or it's key must be defined + or its key must be defined type: boolean required: - key @@ -897,7 +905,7 @@ spec: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, - status.hostIP, status.podIP.' + status.hostIP, status.podIP, status.podIPs.' properties: apiVersion: description: Version of the schema the @@ -923,10 +931,14 @@ spec: for volumes, optional for env vars' type: string divisor: + anyOf: + - type: integer + - type: string description: Specifies the output format of the exposed resources, defaults to "1" - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true resource: description: 'Required: resource to select' @@ -951,7 +963,7 @@ spec: type: string optional: description: Specify whether the Secret - or it's key must be defined + or its key must be defined type: boolean required: - key @@ -1127,18 +1139,18 @@ spec: preStop: description: 'PreStop is called immediately before a container is terminated due to an API request - or management event such as liveness probe failure, - preemption, resource contention, etc. The handler - is not called if the container crashes or exits. - The reason for termination is passed to the - handler. The Pod''s termination grace period - countdown begins before the PreStop hooked is - executed. Regardless of the outcome of the handler, - the container will eventually terminate within - the Pod''s termination grace period. Other management - of the container blocks until the hook completes - or until the termination grace period is reached. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + or management event such as liveness/startup + probe failure, preemption, resource contention, + etc. The handler is not called if the container + crashes or exits. The reason for termination + is passed to the handler. The Pod''s termination + grace period countdown begins before the PreStop + hooked is executed. Regardless of the outcome + of the handler, the container will eventually + terminate within the Pod''s termination grace + period. Other management of the container blocks + until the hook completes or until the termination + grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: exec: description: One and only one of the following @@ -1325,7 +1337,7 @@ spec: description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for - liveness. Minimum value is 1. + liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -1406,6 +1418,10 @@ spec: - containerPort type: object type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: description: 'Periodic probe of container service readiness. Container will be removed from service @@ -1500,7 +1516,7 @@ spec: description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for - liveness. Minimum value is 1. + liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -1537,13 +1553,21 @@ spec: properties: limits: additionalProperties: - type: string + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' type: object requests: additionalProperties: - type: string + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits @@ -1661,6 +1685,1359 @@ spec: that applies to the container. type: string type: object + windowsOptions: + description: The Windows specific settings applied + to all containers. If unspecified, the options + from the PodSecurityContext will be used. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the + GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName + field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the + name of the GMSA credential spec to use. + type: string + runAsUserName: + description: The UserName in Windows to run + the entrypoint of the container process. + Defaults to the user specified in image + metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod + has successfully initialized. If specified, no other + probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, + just as if the livenessProbe failed. This can be + used to provide different probe parameters at the + beginning of a Pod''s lifecycle, when it might take + a long time to load data or warm a cache, than during + steady-state operation. This cannot be updated. + This is a beta feature enabled by the StartupProbe + feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following + should be specified. Exec specifies the action + to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for + the probe to be considered failed after having + succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to + access on the container. Number must be + in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum value + is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for + the probe to be considered successful after + having failed. Defaults to 1. Must be 1 for + liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: + implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to + access on the container. Number must be + in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate + a buffer for stdin in the container runtime. If + this is not set, reads from stdin in the container + will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should + close the stdin channel after it has been opened + by a single attach. When stdin is true the stdin + stream will remain open across multiple attach sessions. + If stdinOnce is set to true, stdin is opened on + container start, is empty until the first client + attaches to stdin, and then remains open and accepts + data until the client disconnects, at which time + stdin is closed and remains closed until the container + is restarted. If this flag is false, a container + processes that reads from stdin will never receive + an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to + which the container''s termination message will + be written is mounted into the container''s filesystem. + Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated + by the node if greater than 4096 bytes. The total + message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot + be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message + should be populated. File will use the contents + of terminationMessagePath to populate the container + status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output + if the termination message file is empty and the + container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is + smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate + a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of + a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's + filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of + a Volume within a container. + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how + mounts are propagated from the host to container + and the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults + to false. + type: boolean + subPath: + description: Path within the volume from which + the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should be + mounted. Behaves similarly to SubPath but + environment variable references $(VAR_NAME) + are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr + and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not + specified, the container runtime's default will + be used, which might be configured in the container + image. Cannot be updated. + type: string + required: + - name + type: object + type: array + dnsConfig: + description: Specifies the DNS parameters of a pod. Parameters + specified here will be merged to the generated DNS configuration + based on DNSPolicy. + properties: + nameservers: + description: A list of DNS name server IP addresses. + This will be appended to the base nameservers generated + from DNSPolicy. Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: A list of DNS resolver options. This will + be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options + given in Options will override those that appear in + the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: A list of DNS search domains for host-name + lookup. This will be appended to the base search paths + generated from DNSPolicy. Duplicated search paths + will be removed. + items: + type: string + type: array + type: object + dnsPolicy: + description: Set DNS policy for the pod. Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', + 'Default' or 'None'. DNS parameters given in DNSConfig + will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have + to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: 'EnableServiceLinks indicates whether information + about services should be injected into pod''s environment + variables, matching the syntax of Docker links. Optional: + Defaults to true.' + type: boolean + ephemeralContainers: + description: List of ephemeral containers run in this pod. + Ephemeral containers may be run in an existing pod to + perform user-initiated actions such as debugging. This + list cannot be specified when creating a pod, and it cannot + be modified by updating the pod spec. In order to add + an ephemeral container to an existing pod, use the pod's + ephemeralcontainers subresource. This field is alpha-level + and is only honored by servers that enable the EphemeralContainers + feature. + items: + description: An EphemeralContainer is a container that + may be added temporarily to an existing pod for user-initiated + activities such as debugging. Ephemeral containers have + no resource or scheduling guarantees, and they will + not be restarted when they exit or when a pod is removed + or restarted. If an ephemeral container causes a pod + to exceed its resource allocation, the pod may be evicted. + Ephemeral containers may not be added by directly updating + the pod spec. They must be added via the pod's ephemeralcontainers + subresource, and they will appear in the pod spec once + added. This is an alpha feature enabled by the EphemeralContainers + feature flag. + properties: + args: + description: 'Arguments to the entrypoint. The docker + image''s CMD is used if this is not provided. Variable + references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the + reference in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped with a double + $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within + a shell. The docker image''s ENTRYPOINT is used + if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. + If a variable cannot be resolved, the reference + in the input string will be unchanged. The $(VAR_NAME) + syntax can be escaped with a double $$, ie: $$(VAR_NAME). + Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be + updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set + in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) + are expanded using the previous defined environment + variables in the container and any service + environment variables. If a variable cannot + be resolved, the reference in the input string + will be unchanged. The $(VAR_NAME) syntax + can be escaped with a double $$, ie: $$(VAR_NAME). + Escaped references will never be expanded, + regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: + supports metadata.name, metadata.namespace, + metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, + status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, + requests.cpu, requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment + variables in the container. The keys defined within + a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container + is starting. When a key exists in multiple sources, + the value associated with the last source will take + precedence. Values defined by an Env with a duplicate + key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, + IfNotPresent. Defaults to Always if :latest tag + is specified, or IfNotPresent otherwise. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: 'PostStart is called immediately + after a container is created. If the handler + fails, the container is terminated and restarted + according to its restart policy. Other management + of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following + should be specified. Exec specifies the + action to take. + properties: + command: + description: Command is the command line + to execute inside the container, the + working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to explicitly + call out to that shell. Exit status + of 0 is treated as live/healthy and + non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a + custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action + involving a TCP port. TCP hooks not yet + supported TODO: implement a realistic TCP + lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before + a container is terminated due to an API request + or management event such as liveness/startup + probe failure, preemption, resource contention, + etc. The handler is not called if the container + crashes or exits. The reason for termination + is passed to the handler. The Pod''s termination + grace period countdown begins before the PreStop + hooked is executed. Regardless of the outcome + of the handler, the container will eventually + terminate within the Pod''s termination grace + period. Other management of the container blocks + until the hook completes or until the termination + grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following + should be specified. Exec specifies the + action to take. + properties: + command: + description: Command is the command line + to execute inside the container, the + working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to explicitly + call out to that shell. Exit status + of 0 is treated as live/healthy and + non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a + custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action + involving a TCP port. TCP hooks not yet + supported TODO: implement a realistic TCP + lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral + containers. + properties: + exec: + description: One and only one of the following + should be specified. Exec specifies the action + to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for + the probe to be considered failed after having + succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to + access on the container. Number must be + in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum value + is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for + the probe to be considered successful after + having failed. Defaults to 1. Must be 1 for + liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: + implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to + access on the container. Number must be + in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the ephemeral container specified + as a DNS_LABEL. This name must be unique among all + containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network + port in a single container. + properties: + containerPort: + description: Number of port to expose on the + pod's IP address. This must be a valid port + number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: Number of port to expose on the + host. If specified, this must be a valid port + number, 0 < x < 65536. If HostNetwork is specified, + this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port + in a pod must have a unique name. Name for + the port that can be referred to by services. + type: string + protocol: + description: Protocol for port. Must be UDP, + TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + description: Probes are not allowed for ephemeral + containers. + properties: + exec: + description: One and only one of the following + should be specified. Exec specifies the action + to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for + the probe to be considered failed after having + succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to + access on the container. Number must be + in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum value + is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for + the probe to be considered successful after + having failed. Defaults to 1. Must be 1 for + liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: + implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to + access on the container. Number must be + in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: Resources are not allowed for ephemeral + containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: SecurityContext is not allowed for ephemeral + containers. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls + whether a process can gain more privileges than + its parent process. This bool directly controls + if the no_new_privs flag will be set on the + container process. AllowPrivilegeEscalation + is true always when the container is: 1) run + as Privileged 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when + running containers. Defaults to the default + set of capabilities granted by the container + runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX + capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX + capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. + Processes in privileged containers are essentially + equivalent to root on the host. Defaults to + false. + type: boolean + procMount: + description: procMount denotes the type of proc + mount to use for the containers. The default + is DefaultProcMount which uses the container + runtime defaults for readonly paths and masked + paths. This requires the ProcMountType feature + flag to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only + root filesystem. Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of + the container process. Uses runtime default + if unset. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must + run as a non-root user. If true, the Kubelet + will validate the image at runtime to ensure + that it does not run as UID 0 (root) and fail + to start the container if it does. If unset + or false, no such validation will be performed. + May also be set in PodSecurityContext. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of + the container process. Defaults to user specified + in image metadata if unspecified. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified + in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied + to the container. If unspecified, the container + runtime will allocate a random SELinux context + for each container. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + properties: + level: + description: Level is SELinux level label + that applies to the container. + type: string + role: + description: Role is a SELinux role label + that applies to the container. + type: string + type: + description: Type is a SELinux type label + that applies to the container. + type: string + user: + description: User is a SELinux user label + that applies to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied + to all containers. If unspecified, the options + from the PodSecurityContext will be used. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the + GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName + field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the + name of the GMSA credential spec to use. + type: string + runAsUserName: + description: The UserName in Windows to run + the entrypoint of the container process. + Defaults to the user specified in image + metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral + containers. + properties: + exec: + description: One and only one of the following + should be specified. Exec specifies the action + to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for + the probe to be considered failed after having + succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to + access on the container. Number must be + in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum value + is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for + the probe to be considered successful after + having failed. Defaults to 1. Must be 1 for + liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: + implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to + access on the container. Number must be + in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer type: object stdin: description: Whether this container should allocate @@ -1682,6 +3059,15 @@ spec: processes that reads from stdin will never receive an EOF. Default is false type: boolean + targetContainerName: + description: If set, the name of the container from + PodSpec that this ephemeral container targets. The + ephemeral container will be run in the namespaces + (IPC, PID, etc) of this container. If not set then + the ephemeral container is run in whatever namespaces + are shared for the pod. Note that the container + runtime must support this feature. + type: string terminationMessagePath: description: 'Optional: Path at which the file to which the container''s termination message will @@ -1711,7 +3097,7 @@ spec: type: boolean volumeDevices: description: volumeDevices is the list of block devices - to be used by the container. This is a beta feature. + to be used by the container. items: description: volumeDevice describes a mapping of a raw block device within a container. @@ -1768,8 +3154,7 @@ spec: environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr - and SubPath are mutually exclusive. This field - is alpha in 1.14. + and SubPath are mutually exclusive. type: string required: - mountPath @@ -1786,58 +3171,6 @@ spec: - name type: object type: array - dnsConfig: - description: Specifies the DNS parameters of a pod. Parameters - specified here will be merged to the generated DNS configuration - based on DNSPolicy. - properties: - nameservers: - description: A list of DNS name server IP addresses. - This will be appended to the base nameservers generated - from DNSPolicy. Duplicated nameservers will be removed. - items: - type: string - type: array - options: - description: A list of DNS resolver options. This will - be merged with the base options generated from DNSPolicy. - Duplicated entries will be removed. Resolution options - given in Options will override those that appear in - the base DNSPolicy. - items: - description: PodDNSConfigOption defines DNS resolver - options of a pod. - properties: - name: - description: Required. - type: string - value: - type: string - type: object - type: array - searches: - description: A list of DNS search domains for host-name - lookup. This will be appended to the base search paths - generated from DNSPolicy. Duplicated search paths - will be removed. - items: - type: string - type: array - type: object - dnsPolicy: - description: Set DNS policy for the pod. Defaults to "ClusterFirst". - Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', - 'Default' or 'None'. DNS parameters given in DNSConfig - will be merged with the policy selected with DNSPolicy. - To have DNS options set along with hostNetwork, you have - to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. - type: string - enableServiceLinks: - description: 'EnableServiceLinks indicates whether information - about services should be injected into pod''s environment - variables, matching the syntax of Docker links. Optional: - Defaults to true.' - type: boolean hostAliases: description: HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if @@ -1902,8 +3235,8 @@ spec: to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness - probes, or Liveness probes. The resourceRequirements of - an init container are taken into account during scheduling + probes, Liveness probes, or Startup probes. The resourceRequirements + of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers @@ -1981,7 +3314,7 @@ spec: type: string optional: description: Specify whether the ConfigMap - or it's key must be defined + or its key must be defined type: boolean required: - key @@ -1991,7 +3324,7 @@ spec: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, - status.hostIP, status.podIP.' + status.hostIP, status.podIP, status.podIPs.' properties: apiVersion: description: Version of the schema the @@ -2017,10 +3350,14 @@ spec: for volumes, optional for env vars' type: string divisor: + anyOf: + - type: integer + - type: string description: Specifies the output format of the exposed resources, defaults to "1" - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true resource: description: 'Required: resource to select' @@ -2045,7 +3382,7 @@ spec: type: string optional: description: Specify whether the Secret - or it's key must be defined + or its key must be defined type: boolean required: - key @@ -2221,18 +3558,18 @@ spec: preStop: description: 'PreStop is called immediately before a container is terminated due to an API request - or management event such as liveness probe failure, - preemption, resource contention, etc. The handler - is not called if the container crashes or exits. - The reason for termination is passed to the - handler. The Pod''s termination grace period - countdown begins before the PreStop hooked is - executed. Regardless of the outcome of the handler, - the container will eventually terminate within - the Pod''s termination grace period. Other management - of the container blocks until the hook completes - or until the termination grace period is reached. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + or management event such as liveness/startup + probe failure, preemption, resource contention, + etc. The handler is not called if the container + crashes or exits. The reason for termination + is passed to the handler. The Pod''s termination + grace period countdown begins before the PreStop + hooked is executed. Regardless of the outcome + of the handler, the container will eventually + terminate within the Pod''s termination grace + period. Other management of the container blocks + until the hook completes or until the termination + grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: exec: description: One and only one of the following @@ -2419,7 +3756,7 @@ spec: description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for - liveness. Minimum value is 1. + liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -2500,6 +3837,10 @@ spec: - containerPort type: object type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: description: 'Periodic probe of container service readiness. Container will be removed from service @@ -2594,7 +3935,7 @@ spec: description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for - liveness. Minimum value is 1. + liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -2631,13 +3972,21 @@ spec: properties: limits: additionalProperties: - type: string + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' type: object requests: additionalProperties: - type: string + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits @@ -2755,6 +4104,167 @@ spec: that applies to the container. type: string type: object + windowsOptions: + description: The Windows specific settings applied + to all containers. If unspecified, the options + from the PodSecurityContext will be used. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the + GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName + field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the + name of the GMSA credential spec to use. + type: string + runAsUserName: + description: The UserName in Windows to run + the entrypoint of the container process. + Defaults to the user specified in image + metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod + has successfully initialized. If specified, no other + probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, + just as if the livenessProbe failed. This can be + used to provide different probe parameters at the + beginning of a Pod''s lifecycle, when it might take + a long time to load data or warm a cache, than during + steady-state operation. This cannot be updated. + This is a beta feature enabled by the StartupProbe + feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following + should be specified. Exec specifies the action + to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for + the probe to be considered failed after having + succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to + access on the container. Number must be + in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum value + is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for + the probe to be considered successful after + having failed. Defaults to 1. Must be 1 for + liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: + implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to + access on the container. Number must be + in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer type: object stdin: description: Whether this container should allocate @@ -2805,7 +4315,7 @@ spec: type: boolean volumeDevices: description: volumeDevices is the list of block devices - to be used by the container. This is a beta feature. + to be used by the container. items: description: volumeDevice describes a mapping of a raw block device within a container. @@ -2862,8 +4372,7 @@ spec: environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr - and SubPath are mutually exclusive. This field - is alpha in 1.14. + and SubPath are mutually exclusive. type: string required: - mountPath @@ -2894,6 +4403,35 @@ spec: a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Overhead represents the resource overhead + associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by + the RuntimeClass admission controller. If the RuntimeClass + admission controller is enabled, overhead must not be + set in Pod create requests. The RuntimeClass admission + controller will reject Pod create requests which have + the overhead already set. If RuntimeClass is configured + and selected in the PodSpec, Overhead will be set to the + value defined in the corresponding RuntimeClass, otherwise + it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + This field is alpha-level as of Kubernetes v1.16, and + is only honored by servers that enable the PodOverhead + feature.' + type: object + preemptionPolicy: + description: PreemptionPolicy is the Policy for preempting + pods with lower priority. One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. This field + is alpha-level and is only honored by servers that enable + the NonPreemptingPriority feature. + type: string priority: description: The priority value. Various system components use this field to find the priority of the pod. When Priority @@ -2943,7 +4481,7 @@ spec: empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md - This is an alpha feature and may change in the future.' + This is a beta feature as of Kubernetes v1.14.' type: string schedulerName: description: If specified, the pod will be dispatched by @@ -2967,6 +4505,16 @@ spec: permissions of any volume." format: int64 type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of + changing ownership and permission of the volume before + being exposed inside Pod. This field will only apply + to volume types which support fsGroup based ownership(and + permissions). It will have no effect on ephemeral + volume types such as: secret, configmaps and emptydir. + Valid values are "OnRootMismatch" and "Always". If + not specified defaults to "Always".' + type: string runAsGroup: description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be @@ -3047,6 +4595,32 @@ spec: - value type: object type: array + windowsOptions: + description: The Windows specific settings applied to + all containers. If unspecified, the options within + a container's SecurityContext will be used. If set + in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA + admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec + named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + runAsUserName: + description: The UserName in Windows to run the + entrypoint of the container process. Defaults + to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + type: string + type: object type: object serviceAccount: description: 'DeprecatedServiceAccount is a depreciated @@ -3063,9 +4637,7 @@ spec: will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace - cannot both be set. Optional: Default to false. This field - is beta-level and may be disabled with the PodShareProcessNamespace - feature.' + cannot both be set. Optional: Default to false.' type: boolean subdomain: description: If specified, the fully qualified Pod hostname @@ -3129,6 +4701,119 @@ spec: type: string type: object type: array + topologySpreadConstraints: + description: TopologySpreadConstraints describes how a group + of pods ought to spread across topology domains. Scheduler + will schedule pods in a way which abides by the constraints. + This field is only honored by clusters that enable the + EvenPodsSpread feature. All topologySpreadConstraints + are ANDed. + items: + description: TopologySpreadConstraint specifies how to + spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching + pods. Pods that match this label selector are counted + to determine the number of pods in their corresponding + topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which + pods may be unevenly distributed. It''s the maximum + permitted difference between the number of matching + pods in any two topology domains of a given topology + type. For example, in a 3-zone cluster, MaxSkew + is set to 1, and pods with the same labelSelector + spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | + - if MaxSkew is 1, incoming pod can only be scheduled + to zone3 to become 1/1/1; scheduling it onto zone1(zone2) + would make the ActualSkew(2-0) on zone1(zone2) violate + MaxSkew(1). - if MaxSkew is 2, incoming pod can + be scheduled onto any zone. It''s a required field. + Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. + Nodes that have a label with this key and identical + values are considered to be in the same topology. + We consider each as a "bucket", and + try to put balanced number of pods into each bucket. + It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal + with a pod if it doesn''t satisfy the spread constraint. + - DoNotSchedule (default) tells the scheduler not + to schedule it - ScheduleAnyway tells the scheduler + to still schedule it It''s considered as "Unsatisfiable" + if and only if placing incoming pod on any topology + violates "MaxSkew". For example, in a 3-zone cluster, + MaxSkew is set to 1, and pods with the same labelSelector + spread as 3/1/1: | zone1 | zone2 | zone3 | | P P + P | P | P | If WhenUnsatisfiable is set + to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) + on zone2(zone3) satisfies MaxSkew(1). In other words, + the cluster can still be imbalanced, but scheduler + won''t make it *more* imbalanced. It''s a required + field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map volumes: description: 'List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes' @@ -3237,7 +4922,7 @@ spec: properties: monitors: description: 'Required: Monitors is a collection - of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it' + of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' items: type: string type: array @@ -3248,17 +4933,17 @@ spec: readOnly: description: 'Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting - in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it' + in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' type: boolean secretFile: description: 'Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it' + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' type: string secretRef: description: 'Optional: SecretRef is reference to the authentication secret for User, default - is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it' + is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' properties: name: description: 'Name of the referent. More info: @@ -3269,27 +4954,27 @@ spec: type: object user: description: 'Optional: User is the rados user - name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it' + name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' type: string required: - monitors type: object cinder: description: 'Cinder represents a cinder volume attached - and mounted on kubelets host machine More info: - https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md' + and mounted on kubelets host machine. More info: + https://examples.k8s.io/mysql-cinder-pd/README.md' properties: fsType: description: 'Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: - https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md' + https://examples.k8s.io/mysql-cinder-pd/README.md' type: string readOnly: description: 'Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting - in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md' + in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' type: boolean secretRef: description: 'Optional: points to a secret object @@ -3304,7 +4989,7 @@ spec: type: object volumeID: description: 'volume id used to identify the volume - in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md' + in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' type: string required: - volumeID @@ -3371,7 +5056,7 @@ spec: type: string optional: description: Specify whether the ConfigMap or - it's keys must be defined + its keys must be defined type: boolean type: object csi: @@ -3491,10 +5176,14 @@ spec: for volumes, optional for env vars' type: string divisor: + anyOf: + - type: integer + - type: string description: Specifies the output format of the exposed resources, defaults to "1" - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true resource: description: 'Required: resource to select' @@ -3519,6 +5208,9 @@ spec: info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' type: string sizeLimit: + anyOf: + - type: integer + - type: string description: 'Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum @@ -3527,7 +5219,8 @@ spec: here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true type: object fc: description: FC represents a Fibre Channel resource @@ -3690,21 +5383,21 @@ spec: glusterfs: description: 'Glusterfs represents a Glusterfs mount on the host that shares a pod''s lifetime. More - info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md' + info: https://examples.k8s.io/volumes/glusterfs/README.md' properties: endpoints: description: 'EndpointsName is the endpoint name that details Glusterfs topology. More info: - https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod' + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' type: string path: description: 'Path is the Glusterfs volume path. - More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod' + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' type: string readOnly: description: 'ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. - Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod' + Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' type: boolean required: - endpoints @@ -3736,7 +5429,7 @@ spec: iscsi: description: 'ISCSI represents an ISCSI Disk resource that is attached to a kubelet''s host machine and - then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md' + then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' properties: chapAuthDiscovery: description: whether support iSCSI Discovery CHAP @@ -3968,7 +5661,7 @@ spec: type: string optional: description: Specify whether the ConfigMap - or it's keys must be defined + or its keys must be defined type: boolean type: object downwardAPI: @@ -4039,10 +5732,14 @@ spec: for env vars' type: string divisor: + anyOf: + - type: integer + - type: string description: Specifies the output format of the exposed resources, defaults to "1" - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true resource: description: 'Required: resource to select' @@ -4198,7 +5895,7 @@ spec: rbd: description: 'RBD represents a Rados Block Device mount on the host that shares a pod''s lifetime. - More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md' + More info: https://examples.k8s.io/volumes/rbd/README.md' properties: fsType: description: 'Filesystem type of the volume that @@ -4212,32 +5909,32 @@ spec: type: string image: description: 'The rados image name. More info: - https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it' + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string keyring: description: 'Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More - info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it' + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string monitors: description: 'A collection of Ceph monitors. More - info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it' + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' items: type: string type: array pool: description: 'The rados pool name. Default is - rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it' + rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string readOnly: description: 'ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. - More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it' + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: boolean secretRef: description: 'SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. - Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it' + Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' properties: name: description: 'Name of the referent. More info: @@ -4248,7 +5945,7 @@ spec: type: object user: description: 'The rados user name. Default is - admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it' + admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string required: - image @@ -4371,7 +6068,7 @@ spec: type: object type: array optional: - description: Specify whether the Secret or it's + description: Specify whether the Secret or its keys must be defined type: boolean secretName: @@ -4480,21 +6177,6 @@ spec: pollingInterval: format: int32 type: integer - scaleTargetRef: - description: ObjectReference holds the a reference to the deployment - this ScaledObject applies - properties: - containerName: - type: string - deploymentName: - type: string - required: - - deploymentName - type: object - scaleType: - description: ScaledObjectScaleType distinguish between Deployment based - and K8s Jobs - type: string triggers: items: description: ScaleTriggers reference the scaler that will be used @@ -4522,21 +6204,37 @@ spec: type: object type: array required: + - jobTargetRef - triggers type: object status: - description: ScaledObjectStatus is the status for a ScaledObject resource + description: ScaledJobStatus defines the observed state of ScaledJob properties: - externalMetricNames: + conditions: items: - type: string + properties: + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition + type: string + required: + - status + - type + type: object type: array lastActiveTime: format: date-time type: string type: object - required: - - spec type: object version: v1alpha1 versions: diff --git a/deploy/crds/keda.sh_scaledobjects_crd.yaml b/deploy/crds/keda.sh_scaledobjects_crd.yaml new file mode 100644 index 00000000000..5ede45ce870 --- /dev/null +++ b/deploy/crds/keda.sh_scaledobjects_crd.yaml @@ -0,0 +1,351 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: scaledobjects.keda.sh +spec: + additionalPrinterColumns: + - JSONPath: .status.scaleTargetKind + name: ScaleTargetKind + type: string + - JSONPath: .spec.scaleTargetRef.name + name: ScaleTargetName + type: string + - JSONPath: .spec.triggers[*].type + name: Triggers + type: string + - JSONPath: .spec.triggers[*].authenticationRef.name + name: Authentication + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=="Active")].status + name: Active + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: keda.sh + names: + kind: ScaledObject + listKind: ScaledObjectList + plural: scaledobjects + shortNames: + - so + singular: scaledobject + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: ScaledObject is a specification for a ScaledObject resource + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ScaledObjectSpec is the spec for a ScaledObject resource + properties: + advanced: + properties: + horizontalPodAutoscalerConfig: + properties: + behavior: + description: HorizontalPodAutoscalerBehavior configures the + scaling behavior of the target in both Up and Down directions + (scaleUp and scaleDown fields respectively). + properties: + scaleDown: + description: scaleDown is scaling policy for scaling Down. + If not set, the default value is to allow to scale down + to minReplicas pods, with a 300 second stabilization window + (i.e., the highest recommendation for the last 300sec + is used). + properties: + policies: + description: policies is a list of potential scaling + polices which can be used during scaling. At least + one policy must be specified, otherwise the HPAScalingRules + will be discarded as invalid + items: + description: HPAScalingPolicy is a single policy which + must hold true for a specified past interval. + properties: + periodSeconds: + description: PeriodSeconds specifies the window + of time for which the policy should hold true. + PeriodSeconds must be greater than zero and + less than or equal to 1800 (30 min). + format: int32 + type: integer + type: + description: Type is used to specify the scaling + policy. + type: string + value: + description: Value contains the amount of change + which is permitted by the policy. It must be + greater than zero + format: int32 + type: integer + required: + - periodSeconds + - type + - value + type: object + type: array + selectPolicy: + description: selectPolicy is used to specify which policy + should be used. If not set, the default value MaxPolicySelect + is used. + type: string + stabilizationWindowSeconds: + description: 'StabilizationWindowSeconds is the number + of seconds for which past recommendations should be + considered while scaling up or scaling down. StabilizationWindowSeconds + must be greater than or equal to zero and less than + or equal to 3600 (one hour). If not set, use the default + values: - For scale up: 0 (i.e. no stabilization is + done). - For scale down: 300 (i.e. the stabilization + window is 300 seconds long).' + format: int32 + type: integer + type: object + scaleUp: + description: 'scaleUp is scaling policy for scaling Up. + If not set, the default value is the higher of: * increase + no more than 4 pods per 60 seconds * double the number + of pods per 60 seconds No stabilization is used.' + properties: + policies: + description: policies is a list of potential scaling + polices which can be used during scaling. At least + one policy must be specified, otherwise the HPAScalingRules + will be discarded as invalid + items: + description: HPAScalingPolicy is a single policy which + must hold true for a specified past interval. + properties: + periodSeconds: + description: PeriodSeconds specifies the window + of time for which the policy should hold true. + PeriodSeconds must be greater than zero and + less than or equal to 1800 (30 min). + format: int32 + type: integer + type: + description: Type is used to specify the scaling + policy. + type: string + value: + description: Value contains the amount of change + which is permitted by the policy. It must be + greater than zero + format: int32 + type: integer + required: + - periodSeconds + - type + - value + type: object + type: array + selectPolicy: + description: selectPolicy is used to specify which policy + should be used. If not set, the default value MaxPolicySelect + is used. + type: string + stabilizationWindowSeconds: + description: 'StabilizationWindowSeconds is the number + of seconds for which past recommendations should be + considered while scaling up or scaling down. StabilizationWindowSeconds + must be greater than or equal to zero and less than + or equal to 3600 (one hour). If not set, use the default + values: - For scale up: 0 (i.e. no stabilization is + done). - For scale down: 300 (i.e. the stabilization + window is 300 seconds long).' + format: int32 + type: integer + type: object + type: object + resourceMetrics: + items: + description: ResourceMetricSource indicates how to scale on + a resource metric known to Kubernetes, as specified in requests + and limits, describing each pod in the current scale target + (e.g. CPU or memory). The values will be averaged together + before being compared to the target. Such metrics are built + in to Kubernetes, and have special scaling options on top + of those available to normal per-pod metrics using the "pods" + source. Only one "target" type should be set. + properties: + name: + description: name is the name of the resource in question. + type: string + target: + description: target specifies the target value for the + given metric + properties: + averageUtilization: + description: averageUtilization is the target value + of the average of the resource metric across all + relevant pods, represented as a percentage of the + requested value of the resource for the pods. Currently + only valid for Resource metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value of the + average of the metric across all relevant pods (as + a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric type + is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the metric + (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - name + - target + type: object + type: array + type: object + type: object + cooldownPeriod: + format: int32 + type: integer + maxReplicaCount: + format: int32 + type: integer + minReplicaCount: + format: int32 + type: integer + pollingInterval: + format: int32 + type: integer + scaleTargetRef: + description: ScaleTarget holds the a reference to the scale target Object + properties: + apiVersion: + type: string + containerName: + type: string + kind: + type: string + name: + type: string + required: + - name + type: object + triggers: + items: + description: ScaleTriggers reference the scaler that will be used + properties: + authenticationRef: + description: ScaledObjectAuthRef points to the TriggerAuthentication + object that is used to authenticate the scaler with the environment + properties: + name: + type: string + required: + - name + type: object + metadata: + additionalProperties: + type: string + type: object + name: + type: string + type: + type: string + required: + - metadata + - type + type: object + type: array + required: + - scaleTargetRef + - triggers + type: object + status: + description: ScaledObjectStatus is the status for a ScaledObject resource + properties: + conditions: + items: + properties: + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition + type: string + required: + - status + - type + type: object + type: array + externalMetricNames: + items: + type: string + type: array + lastActiveTime: + format: date-time + type: string + scaleTargetGVKR: + description: GroupVersionKindResource provides unified structure for + schema.GroupVersionKind and Resource + properties: + group: + type: string + kind: + type: string + resource: + type: string + version: + type: string + required: + - group + - kind + - resource + - version + type: object + scaleTargetKind: + type: string + type: object + required: + - spec + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true diff --git a/deploy/crds/keda.k8s.io_triggerauthentications_crd.yaml b/deploy/crds/keda.sh_triggerauthentications_crd.yaml similarity index 86% rename from deploy/crds/keda.k8s.io_triggerauthentications_crd.yaml rename to deploy/crds/keda.sh_triggerauthentications_crd.yaml index 2279dbdee8a..fa473b13d46 100644 --- a/deploy/crds/keda.k8s.io_triggerauthentications_crd.yaml +++ b/deploy/crds/keda.sh_triggerauthentications_crd.yaml @@ -1,15 +1,29 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: triggerauthentications.keda.k8s.io + name: triggerauthentications.keda.sh spec: - group: keda.k8s.io + additionalPrinterColumns: + - JSONPath: .spec.podIdentity.provider + name: PodIdentity + type: string + - JSONPath: .spec.secretTargetRef[*].name + name: Secret + type: string + - JSONPath: .spec.env[*].name + name: Env + type: string + group: keda.sh names: kind: TriggerAuthentication listKind: TriggerAuthenticationList plural: triggerauthentications + shortNames: + - ta + - triggerauth singular: triggerauthentication scope: Namespaced + subresources: {} validation: openAPIV3Schema: description: TriggerAuthentication defines how a trigger can authenticate @@ -17,12 +31,12 @@ spec: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -32,7 +46,7 @@ spec: env: items: description: AuthEnvironment is used to authenticate using environment - variables in the destination deployment spec + variables in the destination ScaleTarget spec properties: containerName: type: string diff --git a/deploy/crds/keda.sh_v1alpha1_scaledjob_cr.yaml b/deploy/crds/keda.sh_v1alpha1_scaledjob_cr.yaml new file mode 100644 index 00000000000..717f0c57894 --- /dev/null +++ b/deploy/crds/keda.sh_v1alpha1_scaledjob_cr.yaml @@ -0,0 +1,6 @@ +apiVersion: keda.sh/v1alpha1 +kind: ScaledJob +metadata: + name: example-scaledjob +spec: + # Add fields here diff --git a/deploy/crds/keda.k8s.io_v1alpha1_scaledobject_cr.yaml b/deploy/crds/keda.sh_v1alpha1_scaledobject_cr.yaml similarity index 72% rename from deploy/crds/keda.k8s.io_v1alpha1_scaledobject_cr.yaml rename to deploy/crds/keda.sh_v1alpha1_scaledobject_cr.yaml index 689b910d0db..ae0815a788f 100644 --- a/deploy/crds/keda.k8s.io_v1alpha1_scaledobject_cr.yaml +++ b/deploy/crds/keda.sh_v1alpha1_scaledobject_cr.yaml @@ -1,4 +1,4 @@ -apiVersion: keda.k8s.io/v1alpha1 +apiVersion: keda.sh/v1alpha1 kind: ScaledObject metadata: name: example-scaledobject diff --git a/deploy/crds/keda.k8s.io_v1alpha1_triggerauthentication_cr.yaml b/deploy/crds/keda.sh_v1alpha1_triggerauthentication_cr.yaml similarity index 76% rename from deploy/crds/keda.k8s.io_v1alpha1_triggerauthentication_cr.yaml rename to deploy/crds/keda.sh_v1alpha1_triggerauthentication_cr.yaml index 079a70100ad..17a49887f18 100644 --- a/deploy/crds/keda.k8s.io_v1alpha1_triggerauthentication_cr.yaml +++ b/deploy/crds/keda.sh_v1alpha1_triggerauthentication_cr.yaml @@ -1,4 +1,4 @@ -apiVersion: keda.k8s.io/v1alpha1 +apiVersion: keda.sh/v1alpha1 kind: TriggerAuthentication metadata: name: example-triggerauthentication diff --git a/go.mod b/go.mod index bd718d1b96c..da9702259ab 100644 --- a/go.mod +++ b/go.mod @@ -2,80 +2,67 @@ module github.com/kedacore/keda go 1.13 -// Required deps for operator-sdk v0.11.0 <-> kubernetes-incubator/custom-metrics-apiserver on kubernetes-1.14.1 -replace ( - github.com/kubernetes-incubator/custom-metrics-apiserver => github.com/kubernetes-incubator/custom-metrics-apiserver v0.0.0-20190703094830-abe433176c52 - github.com/prometheus/client_golang => github.com/prometheus/client_golang v0.9.2 - github.com/ugorji/go => github.com/ugorji/go v1.1.7 - - k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 - sigs.k8s.io/structured-merge-diff => sigs.k8s.io/structured-merge-diff v0.0.0-20190302045857-e85c7b244fd2 - -) - -// Pinned to kubernetes-1.14.1 -replace ( - k8s.io/api => k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 - k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d - k8s.io/apiserver => k8s.io/apiserver v0.0.0-20190409021813-1ec86e4da56c - k8s.io/client-go => k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible - k8s.io/cloud-provider => k8s.io/cloud-provider v0.0.0-20190409023720-1bc0c81fa51d - k8s.io/metrics => k8s.io/metrics v0.0.0-20190409022812-850dadb8b49c -) - -replace ( - github.com/coreos/prometheus-operator => github.com/coreos/prometheus-operator v0.31.1 - // Pinned to v2.10.0 (kubernetes-1.14.1) so https://proxy.golang.org can - // resolve it correctly. - github.com/prometheus/prometheus => github.com/prometheus/prometheus v1.8.2-0.20190525122359-d20e84d0fb64 - sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.2.2 -) - -replace github.com/operator-framework/operator-sdk => github.com/operator-framework/operator-sdk v0.11.0 - require ( - cloud.google.com/go v0.46.3 - github.com/Azure/azure-amqp-common-go/v2 v2.1.0 + cloud.google.com/go v0.55.0 + github.com/Azure/azure-amqp-common-go/v3 v3.0.0 github.com/Azure/azure-event-hubs-go v1.3.1 - github.com/Azure/azure-sdk-for-go v30.1.0+incompatible - github.com/Azure/azure-service-bus-go v0.9.1 + github.com/Azure/azure-sdk-for-go v41.1.0+incompatible + github.com/Azure/azure-service-bus-go v0.10.2 github.com/Azure/azure-storage-blob-go v0.8.0 - github.com/Azure/azure-storage-queue-go v0.0.0-20190416192124-a17745f1cdbf - github.com/Azure/go-autorest v12.0.0+incompatible - github.com/Huawei/gophercloud v0.0.0-20190806033045-3f2c8f6aa160 - github.com/Shopify/sarama v1.23.1 - github.com/aws/aws-sdk-go v1.25.6 + github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd + github.com/Azure/go-autorest/autorest v0.10.0 // indirect + github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 + github.com/Huawei/gophercloud v1.0.21 + github.com/Shopify/sarama v1.26.4 + github.com/aws/aws-sdk-go v1.32.3 + github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect github.com/go-logr/logr v0.1.0 - github.com/go-openapi/spec v0.19.3 - github.com/go-redis/redis v6.15.5+incompatible - github.com/go-sql-driver/mysql v1.4.1 - github.com/golang/mock v1.3.1 - github.com/golang/protobuf v1.3.2 + github.com/go-openapi/spec v0.19.7 + github.com/go-redis/redis v6.15.8+incompatible + github.com/go-sql-driver/mysql v1.5.0 + github.com/golang/mock v1.4.3 + github.com/golang/protobuf v1.3.5 + github.com/gorilla/websocket v1.4.1 // indirect github.com/hashicorp/vault/api v1.0.4 - github.com/imdario/mergo v0.3.8 - github.com/kubernetes-incubator/custom-metrics-apiserver v0.0.0-20190918110929-3d9be26a50eb + github.com/imdario/mergo v0.3.9 + github.com/kubernetes-incubator/custom-metrics-apiserver v0.0.0-20200323093244-5046ce1afe6b github.com/lib/pq v1.3.0 - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/operator-framework/operator-sdk v0.0.0-00010101000000-000000000000 - github.com/pkg/errors v0.8.1 + github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452 + github.com/operator-framework/operator-sdk v0.18.1 + github.com/pkg/errors v0.9.1 github.com/robfig/cron/v3 v3.0.1 github.com/spf13/pflag v1.0.5 - github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 - github.com/stretchr/testify v1.4.0 + github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71 + github.com/stretchr/testify v1.5.1 + github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc // indirect github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c - google.golang.org/api v0.10.0 - google.golang.org/genproto v0.0.0-20191002211648-c459b9ce5143 - google.golang.org/grpc v1.24.0 - gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect - k8s.io/api v0.0.0-20190918155943-95b840bb6a1f - k8s.io/apimachinery v0.0.0-20191014065749-fb3eea214746 - k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible - k8s.io/component-base v0.0.0-20191014071552-ca590c444ad5 // indirect + google.golang.org/api v0.20.0 + google.golang.org/genproto v0.0.0-20200326112834-f447254575fd + google.golang.org/grpc v1.28.0 + k8s.io/api v0.18.2 + k8s.io/apimachinery v0.18.2 + k8s.io/client-go v12.0.0+incompatible + k8s.io/code-generator v0.18.2 k8s.io/klog v1.0.0 - k8s.io/kube-openapi v0.0.0-20190401085232-94e1e7b7574c - k8s.io/metrics v0.0.0-00010101000000-000000000000 - sigs.k8s.io/controller-runtime v0.2.0 - sigs.k8s.io/structured-merge-diff v0.0.0-20191009170950-ae447d53f5c3 // indirect + k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c + k8s.io/metrics v0.18.2 + pack.ag/amqp v0.12.5 // indirect + sigs.k8s.io/controller-runtime v0.6.0 +) + +// Need to use this until this PR with k8s 1.18 is merged https://github.com/kubernetes-sigs/custom-metrics-apiserver/pull/66 +replace github.com/kubernetes-incubator/custom-metrics-apiserver => github.com/zroubalik/custom-metrics-apiserver v0.0.0-20200504115811-b4bb20049e83 +replace ( + github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.3.2+incompatible // Required by OLM + k8s.io/apiserver => k8s.io/apiserver v0.18.2 // Required by kubernetes-incubator/custom-metrics-apiserver + k8s.io/client-go => k8s.io/client-go v0.18.2 +) + +// Required to resolve go/grpc issues +replace ( + cloud.google.com/go => cloud.google.com/go v0.46.3 + google.golang.org/api => google.golang.org/api v0.10.0 + google.golang.org/genproto => google.golang.org/genproto v0.0.0-20191002211648-c459b9ce5143 + google.golang.org/grpc => google.golang.org/grpc v1.24.0 ) diff --git a/go.sum b/go.sum index e11b3820caa..b949de48dd3 100644 --- a/go.sum +++ b/go.sum @@ -1,194 +1,320 @@ -bitbucket.org/ww/goautoneg v0.0.0-20120707110453-75cd24fc2f2c h1:t+Ra932MCC0eeyD/vigXqMbZTzgZjd4JOfBJWC6VSMI= -bitbucket.org/ww/goautoneg v0.0.0-20120707110453-75cd24fc2f2c/go.mod h1:1vhO7Mn/FZMgOgDVGLy5X1mE6rq1HbkBdkF/yj8zkcg= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.2/go.mod h1:H8IAquKe2L30IxoupDgqTaQvKSwF/c8prYHynGIWQbA= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= -contrib.go.opencensus.io/exporter/ocagent v0.5.0 h1:TKXjQSRS0/cCDrP7KvkgU6SmILtF/yV2TOs/02K/WZQ= -contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.3.0/go.mod h1:9IAwXhoyBJ7z9LcAwkj0/7NnPzYaPeZxxVp3zm+5IqA= +contrib.go.opencensus.io/exporter/ocagent v0.6.0/go.mod h1:zmKjrJcdo0aYcVS7bmEeSEBLPA9YJp5bjrofdU3pIXs= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/Azure/azure-amqp-common-go v1.1.4 h1:DmPXxmLZwi/71CgRTZIKR6yiKEW3eC42S4gSBhfG7y0= github.com/Azure/azure-amqp-common-go v1.1.4/go.mod h1:FhZtXirFANw40UXI2ntweO+VOkfaw8s6vZxUiRhLYW8= -github.com/Azure/azure-amqp-common-go/v2 v2.1.0 h1:+QbFgmWCnPzdaRMfsI0Yb6GrRdBj5jVL8N3EXuEUcBQ= -github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU= +github.com/Azure/azure-amqp-common-go/v3 v3.0.0 h1:j9tjcwhypb/jek3raNrwlCIl7iKQYOug7CLpSyBBodc= +github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg= github.com/Azure/azure-event-hubs-go v1.3.1 h1:vKw7tLOFJ8kVMkhNvOXZWz+3purRQ/WTe60+bavZ5qc= github.com/Azure/azure-event-hubs-go v1.3.1/go.mod h1:me2m3+0WC7G7JRBTWI5SQ81s2TYyOqgV3JIpYg86jZA= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= -github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZOMdj5HYo= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= +github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY= +github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v21.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v23.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v30.1.0+incompatible h1:HyYPft8wXpxMd0kfLtXo6etWcO+XuPbLkcgx9g2cqxU= -github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-service-bus-go v0.9.1 h1:G1qBLQvHCFDv9pcpgwgFkspzvnGknJRR0PYJ9ytY/JA= -github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0= -github.com/Azure/azure-storage-blob-go v0.0.0-20181022225951-5152f14ace1c/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= +github.com/Azure/azure-sdk-for-go v36.1.0+incompatible h1:smHlbChr/JDmsyUqELZXLs0YIgpXecIGdUibuc2983s= +github.com/Azure/azure-sdk-for-go v36.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v37.1.0+incompatible h1:aFlw3lP7ZHQi4m1kWCpcwYtczhDkGhDoRaMTaxcOf68= +github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v41.1.0+incompatible h1:AkS9XaeC8TDd0W0UiJRnEcYEHBO6xzILqqswHiNlSZQ= +github.com/Azure/azure-sdk-for-go v41.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-service-bus-go v0.10.2 h1:yrGKscDQqNXnFXzYfukW34p8Jr4LmzaAOgYaQKEsipM= +github.com/Azure/azure-service-bus-go v0.10.2/go.mod h1:E/FOceuKAFUfpbIJDKWz/May6guE+eGibfGT6q+n1to= github.com/Azure/azure-storage-blob-go v0.0.0-20181023070848-cf01652132cc/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= -github.com/Azure/azure-storage-queue-go v0.0.0-20190416192124-a17745f1cdbf h1:ncld3f7t7BtHt1CrwbVQW9PjpwWyVA8sNJkbrgIfcHI= -github.com/Azure/azure-storage-queue-go v0.0.0-20190416192124-a17745f1cdbf/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= -github.com/Azure/go-autorest v11.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v11.1.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v11.2.8+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v11.7.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v12.0.0+incompatible h1:N+VqClcomLGD/sHb3smbSYYtNMgKpVV3Cd5r5i8z6bQ= -github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd h1:b3wyxBl3vvr15tUAziPBPK354y+LSdfPCpex5oBttHo= +github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= +github.com/Azure/go-amqp v0.12.6 h1:34yItuwhA/nusvq2sPSNPQxZLCf/CtaogYH8n578mnY= +github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v13.3.2+incompatible h1:VxzPyuhtnlBOzc4IWCZHqpyH2d+QMLQEuy3wREyY4oc= +github.com/Azure/go-autorest v13.3.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.3-0.20191028180845-3492b2aff503 h1:uUhdsDMg2GbFLF5GfQPtLMWd5vdDZSfqvqQp3waafxQ= +github.com/Azure/go-autorest/autorest v0.9.3-0.20191028180845-3492b2aff503/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.3 h1:OZEIaBbMdUE/Js+BQKlpO81XlISgipr6yDJ+PSwsgi4= +github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest v0.10.0 h1:mvdtztBqcL8se7MdrUweNieTNi4kfNG6GOJuurQJpuY= +github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503 h1:Hxqlh1uAA8aGpa1dFhDNhll7U/rkWtG8ZItFvRMr7l0= +github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.8.1 h1:pZdL8o72rK+avFWl+p9nE8RWi1JInZrWJYlnpfXJwHk= +github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/to v0.3.1-0.20191028180845-3492b2aff503 h1:2McfZNaDqGPjv2pddK547PENIk4HV+NT7gvqRq4L0us= +github.com/Azure/go-autorest/autorest/to v0.3.1-0.20191028180845-3492b2aff503/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/autorest/validation v0.2.1-0.20191028180845-3492b2aff503 h1:RBrGlrkPWapMcLp1M6ywCqyYKOAT5ERI6lYFvGKOThE= +github.com/Azure/go-autorest/autorest/validation v0.2.1-0.20191028180845-3492b2aff503/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 h1:2T/jmrHeTezcCM58lvEQXs0UpQJCo5SoGAcg+mbSTIg= -github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/Huawei/gophercloud v0.0.0-20190806033045-3f2c8f6aa160 h1:2PTY/4OWLFl3/JmjJ0KWiPRNfi6DugNdphaomxy5Ro4= -github.com/Huawei/gophercloud v0.0.0-20190806033045-3f2c8f6aa160/go.mod h1:TUtAO2PE+Nj7/QdfUXbhi5Xu0uFKVccyukPA7UCxD9w= -github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= +github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Huawei/gophercloud v1.0.21 h1:HhtzZzRGZiVmLypqHlXrGAcdC1TJW99FLewfPSVktpY= +github.com/Huawei/gophercloud v1.0.21/go.mod h1:TUtAO2PE+Nj7/QdfUXbhi5Xu0uFKVccyukPA7UCxD9w= +github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/sprig v0.0.0-20190301161902-9f8fceff796f/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/NYTimes/gziphandler v1.0.1 h1:iLrQrdwjDd52kHDA5op2UBJFjmOb9g+7scBan4RN8F0= -github.com/NYTimes/gziphandler v1.0.1/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/sprig/v3 v3.1.0/go.mod h1:ONGMf7UfYGAbMXCZmQLy8x3lCDIPrEZE/rU8pmrbihA= +github.com/Masterminds/squirrel v1.2.0/go.mod h1:yaPeOnPG5ZRwL9oKdTsO/prlkPbXWZlRVMQ/gGlzIuA= +github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/Shopify/sarama v1.19.0 h1:9oksLxC6uxVPHPVYUmq6xhr1BOF/hHobWH2UzO67z1s= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/sarama v1.23.1 h1:XxJBCZEoWJtoWjf/xRbmGUpAmTZGnuuF0ON0EvxxBrs= -github.com/Shopify/sarama v1.23.1/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= +github.com/Shopify/sarama v1.26.4 h1:+17TxUq/PJEAfZAll0T7XJjSgQWCpaQSoki/x5yN8o8= +github.com/Shopify/sarama v1.26.4/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= -github.com/a8m/mark v0.1.1-0.20170507133748-44f2db618845/go.mod h1:c8Mh99Cw82nrsAnPgxQSZHkswVOJF7/MqZb1ZdvriLM= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/ant31/crd-validation v0.0.0-20180702145049-30f8a35d0ac2/go.mod h1:X0noFIik9YqfhGYBLEHg8LJKEwy7QIitLQuFMpKLcPk= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.15.24/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.25.6 h1:Rmg2pgKXoCfNe0KQb4LNSNmHqMdcgBjpMeXK9IjHWq8= -github.com/aws/aws-sdk-go v1.25.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.48 h1:J82DYDGZHOKHdhx6hD24Tm30c2C3GchYGfN0mf9iKUk= +github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.32.3 h1:E3OciOGVlJrv1gQ2T7/Oou+I9nGPB2j978THQjvZBf0= +github.com/aws/aws-sdk-go v1.32.3/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/biogo/store v0.0.0-20160505134755-913427a1d5e8/go.mod h1:Iev9Q3MErcn+w3UOJD/DkEzllvugfdx7bGcMOFhvr/4= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/brancz/gojsontoyaml v0.0.0-20190425155809-e8bd32d46b3d/go.mod h1:IyUJYN1gvWjtLF5ZuygmxbnsAyP3aJS6cHzIuZY50B0= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= +github.com/brancz/gojsontoyaml v0.0.0-20191212081931-bf2969bbd742/go.mod h1:IyUJYN1gvWjtLF5ZuygmxbnsAyP3aJS6cHzIuZY50B0= +github.com/brancz/kube-rbac-proxy v0.5.0/go.mod h1:cL2VjiIFGS90Cjh5ZZ8+It6tMcBt8rwvuw2J6Mamnl0= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= -github.com/cenk/backoff v2.0.0+incompatible/go.mod h1:7FtoeaSnHoZnmZzz47cM35Y9nSW7tNyaidugnHTaFDE= -github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= -github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20180905225744-ee1a9a0726d2/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= +github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v0.0.0-20181017004759-096ff4a8a059/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/chai2010/gettext-go v0.0.0-20170215093142-bf70f2a70fb1/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= +github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/cmux v0.0.0-20170110192607-30d10be49292/go.mod h1:qRiX68mZX1lGBkTWyp3CLcenw9I94W2dLeRvMzcn9N4= -github.com/cockroachdb/cockroach v0.0.0-20170608034007-84bc9597164f/go.mod h1:xeT/CQ0qZHangbYbWShlCGAx31aV4AjGswDUjhKS6HQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/coreos/bbolt v1.3.0/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY= -github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.9+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= +github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.12+incompatible h1:pAWNwdf7QiT1zfaWyqCtNZQWCLByQyA3JrSQyuYAqnQ= -github.com/coreos/etcd v3.3.12+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142 h1:3jFq2xL4ZajGK4aZY8jz+DAF0FHjI51BXjjSwCzS1Dk= -github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/prometheus-operator v0.31.1 h1:+pS6+Ha01vuoGBh5glhfngk2sB1ELwe6tbXhMkMmp/U= -github.com/coreos/prometheus-operator v0.31.1/go.mod h1:vHwtP2e+VmEeS6m6lgp87aH+npGVRQsCi5jhcuQA1sA= +github.com/coreos/prometheus-operator v0.38.1-0.20200424145508-7e176fda06cc h1:nMbUjGuF7UzVluucix/vsy4973BNdEiT/aX6kFtskKM= +github.com/coreos/prometheus-operator v0.38.1-0.20200424145508-7e176fda06cc/go.mod h1:erio69w1R/aC14D5nfvAXSlE8FT8jt2Hnavc50Dp33A= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= +github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= +github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc= +github.com/cznic/internal v0.0.0-20180608152220-f44710a21d00/go.mod h1:olo7eAdKwJdXxb55TKGLiJ6xt1H0/tiiRCWKVLmtjY4= +github.com/cznic/lldb v1.1.0/go.mod h1:FIZVUmYUVhPwRiPzL8nD/mpFcJ/G7SSXjjXYG4uRI3A= +github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= +github.com/cznic/ql v1.2.0/go.mod h1:FbpzhyZrqr0PVlK6ury+PoW3T0ODUV22OeWIxcaOrSE= +github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= +github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= +github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc/go.mod h1:Y1SNZ4dRUOKXshKUbwUapqNncRrho4mkjQebgEHZLj8= +github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= +github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC4G4As= +github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= +github.com/denisenkom/go-mssqldb v0.0.0-20191001013358-cfbb681360f0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dimchansky/utfbom v1.0.0 h1:fGC2kkf4qOoKqZ4q7iIh+Vef4ubC1c38UDsEyZynZPc= +github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= github.com/dimchansky/utfbom v1.0.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/docker/distribution v2.6.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/elastic/gosigar v0.9.0/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs= -github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk= -github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= -github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy/ext v0.0.0-20190421051319-9d40249d3c2f/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= -github.com/emicklei/go-restful v2.6.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.8.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.8.1+incompatible h1:AyDqLHbJ1quqbWr/OWDw+PlIP8ZFoTmYrGYaxzrLbNg= -github.com/emicklei/go-restful v2.8.1+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful-swagger12 v0.0.0-20170926063155-7524189396c6 h1:V94anc0ZG3Pa/cAMwP2m1aQW3+/FF8Qmw/GsFyTJAp4= -github.com/emicklei/go-restful-swagger12 v0.0.0-20170926063155-7524189396c6/go.mod h1:qr0VowGBT4CS4Q8vFF8BSeKz34PuqKGxs/L0IAQA9DQ= -github.com/evanphx/json-patch v3.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elastic/go-sysinfo v1.0.1/go.mod h1:O/D5m1VpYLwGjCYzEt63g3Z1uO3jXfwyzzjiW90t8cY= +github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= +github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful-swagger12 v0.0.0-20170208215640-dcef7f557305 h1:2vAWk0wMCWb/pYiyat2rRZp5I5ZM+efPlagySNZ3JeM= +github.com/emicklei/go-restful-swagger12 v0.0.0-20170208215640-dcef7f557305/go.mod h1:qr0VowGBT4CS4Q8vFF8BSeKz34PuqKGxs/L0IAQA9DQ= +github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= -github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= +github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/fatih/structtag v1.0.0/go.mod h1:IKitwq45uXL/yqi5mYghiD3w9H6eTOvI9vnk8tXMphA= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk= +github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/gernest/wow v0.1.0/go.mod h1:dEPabJRi5BneI1Nev1VWo0ZlcTWibHWp43qxKms4elY= -github.com/getsentry/raven-go v0.1.2/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -197,134 +323,195 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v0.1.1 h1:qXBXPDdNncunGs7XeEpsJt8wCjYBygluzfdLO0G5baE= github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.17.2/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.17.2/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.17.2/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= +github.com/go-openapi/jsonreference v0.17.2/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.18.0/go.mod h1:uI6pHuxWYTy94zZxgcwJkUWa9wbIlhteGfloI10GD4U= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.7 h1:0xWSeMd35y5avQAThZR2PkEuqSosoS5t6gDH4L8n11M= +github.com/go-openapi/spec v0.19.7/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.17.2/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-redis/redis v6.15.5+incompatible h1:pLky8I0rgiblWfa8C1EV7fPEUv0aH6vKRaYHc/YRHVk= -github.com/go-redis/redis v6.15.5+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-redis/redis v6.15.8+incompatible h1:BKZuG6mCnRj5AOaWJXoCgf6rqTYnYJLe4en2hxT7r9o= +github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w= github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= -github.com/gobuffalo/logger v1.0.0/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= +github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= +github.com/gobuffalo/flect v0.2.1/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= +github.com/gobuffalo/logger v1.0.1/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= -github.com/gobuffalo/packr v1.30.1/go.mod h1:ljMyFO2EcrnzsHsN99cvbq055Y9OhRrIaviy289eRuk= -github.com/gobuffalo/packr/v2 v2.5.1/go.mod h1:8f9c96ITobJlPzI44jj+4tHnEKNt0xXWSVlXRN9X1Iw= +github.com/gobuffalo/packr/v2 v2.7.1/go.mod h1:qYEvAazPaVxy7Y7KR0W8qYEE+RymX74kETFqjFoFlOc= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang-migrate/migrate/v4 v4.6.2/go.mod h1:JYi6reN3+Z734VZ0akNuyOJNcrg45ZL7LDBMW3WGJL0= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20180924190550-6f2cf27854a4/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE= +github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= +github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= +github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20180605153948-8b03ce837f34/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU= -github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww= github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= -github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= -github.com/gophercloud/gophercloud v0.0.0-20190301152420-fca40860790e/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.2.0 h1:lD2Bce2xBAMNNcFZ0dObTpXkGLlVIb33RPVUNVpw6ic= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gophercloud/gophercloud v0.6.0 h1:Xb2lcqZtml1XjgYZxbeayEemq7ASbeTp09m36gQFpEU= +github.com/gophercloud/gophercloud v0.6.0/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f h1:ShTPMJQes6tubcjzGMODIVG5hlrCeImaBnZzKF2N8SM= -github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20190203031600-7a902570cb17/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v0.0.0-20181025070259-68e3a13e4117/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg= +github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.5.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.6.3/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.2 h1:S+ef0492XaIknb8LMjcwgW2i3cNTzDYMmDrOThOJNWc= -github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-health-probe v0.2.0/go.mod h1:4GVx/bTCtZaSzhjbGueDY5YgBdsmKeVx+LErv/n0L6s= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.4/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.12.1 h1:zCy2xE9ablevUOrUZc3Dl72Dt+ya2FNAvC2yLYMHzi4= +github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-health-probe v0.2.1-0.20181220223928-2bf0a5b182db/go.mod h1:uBKkC2RbarFsvS5jMJHpVhTLvGlGQj9JJwkaePE3FWI= -github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -333,11 +520,14 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.4/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.5.4 h1:1BZvpawXoJCWX6pNtow9+rpEj+3itIlutiqnntI6jOE= github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= @@ -350,18 +540,23 @@ github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdv github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.8.5/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= github.com/hashicorp/vault/api v1.0.4 h1:j08Or/wryXT4AcHj1oCbMd7IijXcKzYUGw59LGu9onU= github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= github.com/hashicorp/vault/sdk v0.1.13 h1:mOEPeOhT7jl0J4AMl1E705+BcmeRs1VmKNb9F0sMLy8= @@ -370,44 +565,59 @@ github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKe github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= -github.com/iancoleman/strcase v0.0.0-20180726023541-3605ed457bf7/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= +github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/iancoleman/strcase v0.0.0-20190422225806-e506e3ef7365/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/improbable-eng/thanos v0.5.0/go.mod h1:RXlsWB7YlTbhIod//QDyd5cBZsnEN0jROXZJY/ol4nk= +github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= +github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb v0.0.0-20170331210902-15e594fc09f1/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= +github.com/influxdata/influxdb v1.7.7/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= -github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM= -github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= -github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jessevdk/go-flags v0.0.0-20180331124232-1c38ed7ad0cc/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 h1:K//n/AqR5HjG3qxbrBCL4vJPW0MVFSs9CPK1OOJdRME= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= -github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/jsonnet-bundler/jsonnet-bundler v0.1.0/go.mod h1:YKsSFc9VFhhLITkJS3X2PrRqWG9u2Jq99udTdDjQLfM= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jsonnet-bundler/jsonnet-bundler v0.3.1/go.mod h1:/by7P/OoohkI3q4CgSFqcoFsVY+IaNbzOVDknEsKDeU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/knz/strtime v0.0.0-20181018220328-af2256ee352c/go.mod h1:4ZxfWkxwtc7dBeifERVVWRy9F9rTU9p0yCDgeCtlius= +github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA= +github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= @@ -415,49 +625,66 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kubernetes-incubator/custom-metrics-apiserver v0.0.0-20190703094830-abe433176c52 h1:NsvzW0q/jJETdDPjBi38PI6T26IsRYtXxqBbAQOb9ic= -github.com/kubernetes-incubator/custom-metrics-apiserver v0.0.0-20190703094830-abe433176c52/go.mod h1:KWRxWvzVCNvDtG9ejU5UdpgvxdCZFMUZu0xroKWG8Bo= +github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE= +github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= github.com/leanovate/gopter v0.2.4/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8= +github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lightstep/lightstep-tracer-go v0.15.6/go.mod h1:6AMpwZpsyCFwSovxzM78e+AsYxE8sGwiM6C3TytaWeI= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.0/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190403194419-1ea4449da983/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/markbates/inflect v1.0.4 h1:5fh1gzTFhfae06u3hzHYO9xe3l3v3nW5Pwt3naLTP5g= +github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= -github.com/martinlindhe/base36 v0.0.0-20180729042928-5cda0030da17/go.mod h1:+AtEs8xrBpCeYgSLoY/aJ6Wf37jtBuR0s35750M27+8= -github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a/go.mod h1:M1qoD/MqPgTZIk0EWKB38wE28ACRfVcn+cU08jyArI0= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe h1:YioO2TiJyAHWHyCRQCP8jk5IzTqmsbGc5qQPIhHo6xs= +github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.6/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/maxbrunsfeld/counterfeiter v0.0.0-20181017030959-1aadac120687/go.mod h1:aoVsckWnsNzazwF2kmD+bzgdr4GBlbK91zsdivQJ2eU= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.10/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/minio/cli v1.20.0/go.mod h1:bYxnK0uS629N3Bq+AOZZ+6lwF77Sodk4+UL9vNuXhOY= -github.com/minio/minio-go/v6 v6.0.27-0.20190529152532-de69c0e465ed/go.mod h1:vaNT59cWULS37E+E9zkuN/BVnKHyXtVGS+b04Boc66Y= +github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/mikefarah/yaml/v2 v2.4.0/go.mod h1:ahVqZF4n1W4NqwvVnZzC4es67xsW9uR/RRf2RRxieJU= +github.com/mikefarah/yq/v2 v2.4.1/go.mod h1:i8SYf1XdgUvY2OFwSqGAtWOOgimD2McJ6iutoxRm4k0= +github.com/minio/minio-go/v6 v6.0.49/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -467,259 +694,384 @@ github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go. github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452 h1:hOY53G+kBFhbYFpRVxHl5eS7laP6B1+Cq+Z9Dry1iMU= github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/mozillazg/go-cos v0.12.0/go.mod h1:Zp6DvvXn0RUOXGJ2chmWt2bLEqRAnJnS3DnAZsJsoaE= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mozillazg/go-cos v0.13.0/go.mod h1:Zp6DvvXn0RUOXGJ2chmWt2bLEqRAnJnS3DnAZsJsoaE= github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v0.0.0-20170117200651-66bb6560562f/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.2-0.20180831124310-ae19f1b56d53/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34= +github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/openshift/origin v0.0.0-20160503220234-8f127d736703/go.mod h1:0Rox5r9C8aQn6j1oAOQ0c1uC86mYbUFObzjBRvUKHII= -github.com/opentracing-contrib/go-stdlib v0.0.0-20170113013457-1de4cc2120e7/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= +github.com/openshift/prom-label-proxy v0.1.1-0.20191016113035-b8153a7f39f1/go.mod h1:p5MuxzsYP1JPsNGwtjtcgRHHlGziCJJfztff91nNixw= +github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/operator-framework/go-appr v0.0.0-20180917210448-f2aef88446f2/go.mod h1:YNzwUx1i6C4dXWcffyq3yaIb0rh/K8/OvQ4vG0SNlSw= -github.com/operator-framework/operator-lifecycle-manager v0.0.0-20181023032605-e838f7fb2186/go.mod h1:Ma5ZXd4S1vmMyewWlF7aO8CZiokR7Sd8dhSfkGkNU4U= -github.com/operator-framework/operator-lifecycle-manager v0.0.0-20190105193533-81104ffdc4fb/go.mod h1:XMyE4n2opUK4N6L45YGQkXXi8F9fD7XDYFv/CsS6V5I= -github.com/operator-framework/operator-lifecycle-manager v0.0.0-20190125151539-1e295784b30a/go.mod h1:vq6TTFvg6ti1Bn6ACsZneZTmjTsURgDD6tQtVDbEgsU= -github.com/operator-framework/operator-lifecycle-manager v0.0.0-20190128024246-5eb7ae5bdb7a/go.mod h1:vq6TTFvg6ti1Bn6ACsZneZTmjTsURgDD6tQtVDbEgsU= -github.com/operator-framework/operator-marketplace v0.0.0-20190216021216-57300a3ef3ba/go.mod h1:msZSL8pXwzQjB+hU+awVrZQw94IwJi3sNZVD3NoESIs= -github.com/operator-framework/operator-registry v1.0.1/go.mod h1:1xEdZjjUg2hPEd52LG3YQ0jtwiwEGdm98S1TH5P4RAA= -github.com/operator-framework/operator-registry v1.0.4/go.mod h1:hve6YwcjM2nGVlscLtNsp9sIIBkNZo6jlJgzWw7vP9s= -github.com/operator-framework/operator-registry v1.1.1/go.mod h1:7D4WEwL+EKti5npUh4/u64DQhawCBRugp8Ql20duUb4= -github.com/operator-framework/operator-sdk v0.11.0 h1:tQumPT2UjD6uhggfAerRbPt+rWOPKC80DmgKUEqeGYo= -github.com/operator-framework/operator-sdk v0.11.0/go.mod h1:Oo+O2br5qR6XSLWY/GgIvTvpsEKtzeWp+I3rHF0WIq8= +github.com/operator-framework/api v0.3.7-0.20200602203552-431198de9fc2/go.mod h1:Xbje9x0SHmh0nihE21kpesB38vk3cyxnE6JdDS8Jo1Q= +github.com/operator-framework/operator-registry v1.12.6-0.20200605115407-01fa069730e2/go.mod h1:loVINznYhgBIkmv83kU4yee88RS0BBk+hqOw9r4bhJk= +github.com/operator-framework/operator-sdk v0.18.1 h1:AOUOBchkx8r3yYF/MDjH0gdtd13ACENuy2gnsSXQXt0= +github.com/operator-framework/operator-sdk v0.18.1/go.mod h1:QMFHXj8+SxF56tfR1QmIU/tc9FKI73TG8Qw7Iy4D2zY= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= -github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= -github.com/petar/GoLLRB v0.0.0-20130427215148-53be0d36a84c/go.mod h1:HUpKUBZnpzkdx0kD/+Yfuft+uD3zHGtXF/XJB14TUr4= -github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/peterbourgon/g2s v0.0.0-20170223122336-d4e7ad98afea/go.mod h1:1VcHEd3ro4QMoHfiNl/j7Jkln9+KQuorp0PItHMJYNg= -github.com/petermattis/goid v0.0.0-20170504144140-0ded85884ba5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= -github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.4.1+incompatible h1:mFe7ttWaflA46Mhqh+jUfjp2qTbPYxLB2/OyBppH9dg= +github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/alertmanager v0.18.0/go.mod h1:WcxHBl40VSPuOaqWae6l6HpnEOVRIycEJ7i9iYkadEE= +github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.2.0/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= +github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= +github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20190104105734-b1c43a6df3ae/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190104112138-b1a0a9a36d74/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/prometheus v1.8.2-0.20190525122359-d20e84d0fb64/go.mod h1:oYrT4Vs22/NcnoVYXt5m4cIHP+znvgyusahVpyETKTw= -github.com/prometheus/tsdb v0.8.0/go.mod h1:fSI0j+IUQrDd7+ZtR9WKIGtoYAYAJUKcKhYLG25tN4g= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= +github.com/prometheus/prometheus v1.8.2-0.20200110114423-1e64d757f711/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI= +github.com/prometheus/prometheus v2.3.2+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rlmcpherson/s3gof3r v0.5.0/go.mod h1:s7vv7SMDPInkitQMuZzH615G7yWHdrU2r/Go7Bo71Rs= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/robfig/cron v0.0.0-20170526150127-736158dc09e1 h1:NZInwlJPD/G44mJDgBEMFvBfbv/QQKCrpo+az/QXn8c= github.com/robfig/cron v0.0.0-20170526150127-736158dc09e1/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rubenv/sql-migrate v0.0.0-20190618074426-f4d34eae5a5c/go.mod h1:WS0rl9eEliYI8DPnr3TOwz4439pay+qNgzJoVya/DmY= -github.com/rubyist/circuitbreaker v2.2.1+incompatible/go.mod h1:Ycs3JgJADPuzJDwffe12k6BZT8hxVi6lFK+gWYJLN4A= +github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.5.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rubenv/sql-migrate v0.0.0-20200212082348-64f95ea68aa3/go.mod h1:rtQlpHw+eR6UrqaS3kX1VYeaCxzCVdimDS7g5Ln4pPc= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/samuel/go-zookeeper v0.0.0-20161028232340-1d7be4effb13/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/sasha-s/go-deadlock v0.0.0-20161201235124-341000892f3d/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10= +github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= +github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sclevine/spec v1.0.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/vfsgen v0.0.0-20180711163814-62bca832be04/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/vfsgen v0.0.0-20180825020608-02ddb050ef6b/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q= +github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= +github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/stevvooe/resumable v0.0.0-20180830230917-22b14a53ba50/go.mod h1:1pdIZTAHUz+HDKDVZ++5xg/duPlhKAIzw9qy42CWYp4= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71 h1:2MR0pKUzlP3SGgj5NYJe/zRYDwOu9ku6YHy+Iw7l5DM= +github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/technosophos/moniker v0.0.0-20180509230615-a5dbd03a2245/go.mod h1:O1c8HleITsZqzNZDjSNzirUGsMT0oGu9LhHKoJrqO+A= -github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/thanos-io/thanos v0.11.0/go.mod h1:N/Yes7J68KqvmY+xM6J5CJqEvWIvKSR5sqGtmuD6wDc= +github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v0.0.0-20181022190402-e5e69e061d4f/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8 h1:3SVOIvH7Ae1KRYyQWRjXWJEA9sS/c/pjvH++55Gr648= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc h1:yUaosFVTJwnltaHbSNC3i82I92quFs+OFPRl8kNMVwo= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/uber/jaeger-client-go v2.20.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= +github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +github.com/zroubalik/custom-metrics-apiserver v0.0.0-20200504115811-b4bb20049e83 h1:X9B1bcERXC9NO/n66LFVFUPAty+o6MOPa6oLORrmcz0= +github.com/zroubalik/custom-metrics-apiserver v0.0.0-20200504115811-b4bb20049e83/go.mod h1:PNkabgNiouaJupBK9usDKpVqzjBKh3aZYRZ8fgGMzLQ= +gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= +go.elastic.co/apm v1.5.0/go.mod h1:OdB9sPtM6Vt7oz3VXt7+KR96i9li74qrxBGHTQygFvk= +go.elastic.co/apm/module/apmhttp v1.5.0/go.mod h1:1FbmNuyD3ddauwzgVwFB0fqY6KbZt3JkV187tGCYYhY= +go.elastic.co/apm/module/apmot v1.5.0/go.mod h1:d2KYwhJParTpyw2WnTNy8geNlHKKFX+4oK3YLlsesWE= +go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 h1:VcrIfasaLFkyjk6KNlXQSzO+B0fZcnECiDrKJsfxka0= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= -go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= +go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -golang.org/x/build v0.0.0-20190314133821-5284462c4bec/go.mod h1:atTaCNAy0f16Ah5aV1gMSwgiKVHwu/JncqDpuRr7lS4= -golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181015023909-0c41d7ab0a0e/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4 h1:ydJNl0ENAG67pFbB+9tfhiL2pYqLhfoaZFw/cjLhY4A= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200414173820-0848c9571904 h1:bXoxMPcSLOq08zI3/c5dEBT6lE4eh+jOh886GHrn6V8= +golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 h1:A1gGSx58LAGVHUUsOf7IiR0u8Xb6W51gRwfDBhkdcaw= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190403144856-b630fd6fe46b/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= @@ -727,246 +1079,318 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181105165119-ca4130e427c7/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181023152157-44b849a8bc13/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190116161447-11f53e031339/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190425145619-16072639606e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190426135247-a129542de9ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180805044716-cb6730876b98/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.1/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011152555-a398e557df60/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181207222222-4c874b978acb/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190118193359-16909d206f00/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190501045030-23463209683d/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624180213-70d37148ca0c/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff h1:On1qIo75ByTwFJ4/W2bIqHcwJ9XAqtSWUs8GwRrIhtc= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190918214516-5a1a30219888/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191004055002-72853e10c5a3/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191030203535-5e247c9ad0a0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191111182352-50fa39b762bc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200403190813-44a64ad78b9b h1:AFZdJUT7jJYXQEC29hYH/WZkoV7+KhwxQGmdZ19yYoY= +golang.org/x/tools v0.0.0-20200403190813-44a64ad78b9b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= -google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= -google.golang.org/api v0.3.0/go.mod h1:IuvZyQh8jgscv8qWfQ4ABd8m7hEudgBFM/EdhA3BnXw= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +gomodules.xyz/jsonpatch/v3 v3.0.1/go.mod h1:CBhndykehEwTOlEfnsfJwvkFQbSN8YZFr9M+cIHAJto= +gomodules.xyz/orderedmap v0.1.0/go.mod h1:g9/TPUCm1t2gwD3j3zfV8uylyYhVdCNSi+xCEIu7yTU= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= google.golang.org/api v0.10.0 h1:7tmAxx3oKE98VMZ+SBZzvYYWRQ9HODBxmC8mXUsraSQ= google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181016170114-94acd270e44e/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20191002211648-c459b9ce5143 h1:tikhlQEJeezbnu0Zcblj7g5vm/L7xt6g1vnfq8mRCS4= google.golang.org/genproto v0.0.0-20191002211648-c459b9ce5143/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/fsnotify/fsnotify.v1 v1.3.1/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= +gopkg.in/imdario/mergo.v0 v0.3.7/go.mod h1:9qPP6AGrlC1G2PTNXko614FwGZvorN7MiBU0Eppok+U= +gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= -gopkg.in/jcmturner/gokrb5.v7 v7.2.3 h1:hHMV/yKPwMnJhPuPx7pH2Uw/3Qyf+thJYlisUc44010= -gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= -gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.3.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.1.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +helm.sh/helm/v3 v3.2.0/go.mod h1:ZaXz/vzktgwjyGGFbUWtIQkscfE7WYoRGP2szqAFHR0= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b h1:aBGgKJUM9Hk/3AE8WaZIApnTxG35kbuQba2w+SXqezo= -k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 h1:q1Qvjzs/iEdXF6A1a8H3AKVFDzJNcJn3nXMs6R6qFtA= -k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= -k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d h1:Jmdtdt1ZnoGfWWIIik61Z7nKYgO3J+swQJtPYsP9wHA= -k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/apiserver v0.0.0-20190409021813-1ec86e4da56c h1:k7ALUVzrOEgz4hOF+pr4pePn7TqZ9lB/8Z8ndMSsWSU= -k8s.io/apiserver v0.0.0-20190409021813-1ec86e4da56c/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w= +howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A= +k8s.io/api v0.0.0-20190813020757-36bff7324fb7/go.mod h1:3Iy+myeAORNCLgjd/Xu9ebwN7Vh59Bw0vh9jhoX+V58= +k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0HRNyQ+8KTEERVsK0PW48= +k8s.io/api v0.0.0-20191115095533-47f6de673b26/go.mod h1:iA/8arsvelvo4IDqIhX4IbjTEKBGgvsf2OraTuRtLFU= +k8s.io/api v0.0.0-20191122220107-b5267f2975e0/go.mod h1:vYpRfxYkMrmPPSesoHEkGNHxNKTk96REAwqm/inQbs0= +k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8= +k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8= +k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= +k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= +k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo= +k8s.io/apiextensions-apiserver v0.18.2 h1:I4v3/jAuQC+89L3Z7dDgAiN4EOjN6sbm6iBqQwHTah8= +k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= +k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA= +k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8= +k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4= +k8s.io/apimachinery v0.0.0-20191115015347-3c7067801da2/go.mod h1:dXFS2zaQR8fyzuvRdJDHw2Aerij/yVGJSre0bZQSVJA= +k8s.io/apimachinery v0.0.0-20191121175448-79c2a76c473a/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= +k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA= +k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= +k8s.io/apiserver v0.18.2 h1:fwKxdTWwwYhxvtjo0UUfX+/fsitsNtfErPNegH2x9ic= +k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= k8s.io/autoscaler v0.0.0-20190607113959-1b4f1855cb8e/go.mod h1:QEXezc9uKPT91dwqhSJq3GNI3B1HxFRQHiku9kmrsSA= -k8s.io/cli-runtime v0.0.0-20181213153952-835b10687cb6/go.mod h1:qWnH3/b8sp/l7EvlDh7ulDU3UWA4P4N1NFbEEP791tM= -k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible h1:U5Bt+dab9K8qaUmXINrkXO135kA11/i5Kg1RUydgaMQ= -k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/cloud-provider v0.0.0-20190409023720-1bc0c81fa51d/go.mod h1:LlIffnLBu+GG7d4ppPzC8UnA1Ex8S+ntmSRVsnr7Xy4= -k8s.io/code-generator v0.0.0-20181203235156-f8cba74510f3/go.mod h1:MYiN+ZJZ9HkETbgVZdWw2AsuAi9PZ4V80cwfuf2axe8= -k8s.io/code-generator v0.0.0-20190311093542-50b561225d70/go.mod h1:MYiN+ZJZ9HkETbgVZdWw2AsuAi9PZ4V80cwfuf2axe8= -k8s.io/component-base v0.0.0-20191014071552-ca590c444ad5 h1:hG740zNJzE+OQJK5qKt8lbK7Yw3gH/OYddRDxDAXG+I= -k8s.io/component-base v0.0.0-20191014071552-ca590c444ad5/go.mod h1:K7cm+qApREO5hh5gsrxxeiL1BbWYNzMXw731icihE2Q= -k8s.io/gengo v0.0.0-20181106084056-51747d6e00da/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20181113154421-fd15ee9cc2f7/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a h1:QoHVuRquf80YZ+/bovwxoMO3Q/A3nt3yTgS0/0nejuk= -k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/helm v2.14.1+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI= +k8s.io/cli-runtime v0.18.0/go.mod h1:1eXfmBsIJosjn9LjEBUd2WVPoPAY9XGTqTFcPMIBsUQ= +k8s.io/cli-runtime v0.18.2/go.mod h1:yfFR2sQQzDsV0VEKGZtrJwEy4hLZ2oj4ZIfodgxAHWQ= +k8s.io/client-go v0.18.2 h1:aLB0iaD4nmwh7arT2wIn+lMnAq7OswjaejkQ8p9bBYE= +k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= +k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE= +k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= +k8s.io/code-generator v0.18.2 h1:C1Nn2JiMf244CvBDKVPX0W2mZFJkVBg54T8OV7/Imso= +k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= +k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= +k8s.io/component-base v0.0.0-20191122220729-2684fb322cb9/go.mod h1:NFuUusy/X4Tk21m21tcNUihnmp4OI7lXU7/xA+rYXkc= +k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c= +k8s.io/component-base v0.18.2 h1:SJweNZAGcUvsypLGNPNGeJ9UgPZQ6+bW+gEHe8uyh/Y= +k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4= +k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.1.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.3 h1:niceAagH1tzskmaie/icWd7ci1wbG7Bf2c6YGcQv+3c= +k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-aggregator v0.0.0-20181204002017-122bac39d429/go.mod h1:8sbzT4QQKDEmSCIbfqjV0sd97GpUT7A4W626sBiYJmU= -k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/kube-openapi v0.0.0-20190320154901-5e45bb682580/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= +k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM= +k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-state-metrics v1.7.2 h1:6vdtgXrrRRMSgnyDmgua+qvgCYv954JNfxXAtDkeLVQ= k8s.io/kube-state-metrics v1.7.2/go.mod h1:U2Y6DRi07sS85rmVPmBFlmv+2peBcL8IWGjM+IjYA/E= -k8s.io/kubernetes v1.11.7-beta.0.0.20181219023948-b875d52ea96d/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/kubernetes v1.11.8-beta.0.0.20190124204751-3a10094374f2/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/kubernetes v1.14.2/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/metrics v0.0.0-20190409022812-850dadb8b49c h1:E1sgDRsVLX06ZYtF2t2CaSHKyfL7z4VePFENmNG+vvY= -k8s.io/metrics v0.0.0-20190409022812-850dadb8b49c/go.mod h1:a25VAbm3QT3xiVl1jtoF1ueAKQM149UdZ+L93ePfV3M= -k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7 h1:8r+l4bNWjRlsFYlQJnKJ2p7s1YQPj4XyXiJVqDHRx7c= +k8s.io/kubectl v0.18.0/go.mod h1:LOkWx9Z5DXMEg5KtOjHhRiC1fqJPLyCr3KtQgEolCkU= +k8s.io/kubectl v0.18.2/go.mod h1:OdgFa3AlsPKRpFFYE7ICTwulXOcMGXHTc+UKhHKvrb4= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/metrics v0.18.0/go.mod h1:8aYTW18koXqjLVKL7Ds05RPMX9ipJZI3mywYvBOxXd4= +k8s.io/metrics v0.18.2 h1:v4J7WKu/Zo/htSH3w//UWJZT9/CpUThXWYyUbQ/F/jY= +k8s.io/metrics v0.18.2/go.mod h1:qga8E7QfYNR9Q89cSCAjinC9pTZ7yv1XSVGUB0vJypg= k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= -k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 h1:VBM/0P5TWxwk+Nw6Z+lAw3DKgO76g90ETOiA6rfLV1Y= -k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20191010214722-8d271d903fe4 h1:Gi+/O1saihwDqnlmC8Vhv1M5Sp4+rbOmK9TbsLn8ZEA= -k8s.io/utils v0.0.0-20191010214722-8d271d903fe4/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6 h1:p0Ai3qVtkbCG/Af26dBmU0E1W58NID3hSSh7cMyylpM= +k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= pack.ag/amqp v0.8.0/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= pack.ag/amqp v0.11.0/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= -pack.ag/amqp v0.11.2 h1:cuNDWLUTbKRtEZwhB0WQBXf9pGbm87pUBXQhvcFxBWg= -pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -sigs.k8s.io/controller-runtime v0.2.2 h1:JT/vJJhUjjL9NZNwnm8AXmqCBUXSCFKmTaNjwDi28N0= -sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I= -sigs.k8s.io/controller-tools v0.2.0/go.mod h1:8t/X+FVWvk6TaBcsa+UKUBbn7GMtvyBKX30SGl4em6Y= +pack.ag/amqp v0.12.5 h1:WjH1KZ0hHZbT62nzDpvFCQD+jgSwRqj6FUOc2/GlqHM= +pack.ag/amqp v0.12.5/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= +rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7 h1:uuHDyjllyzRyCIvvn0OBjiRB0SgBZGqHNYAmjR7fO50= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= +sigs.k8s.io/controller-runtime v0.6.0 h1:Fzna3DY7c4BIP6KwfSlrfnj20DJ+SeMBK8HSFvOk9NM= +sigs.k8s.io/controller-runtime v0.6.0/go.mod h1:CpYf5pdNY/B352A1TFLAS2JVSlnGQ5O2cftPHndTroo= +sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA= +sigs.k8s.io/controller-tools v0.3.0/go.mod h1:enhtKGfxZD1GFEoMgP8Fdbu+uKQ/cq1/WGJhdVChfvI= +sigs.k8s.io/kubebuilder v1.0.9-0.20200513134826-f07a0146a40b/go.mod h1:FGPx0hvP73+bapzWoy5ePuhAJYgJjrFbPxgvWyortM0= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= -sigs.k8s.io/structured-merge-diff v0.0.0-20190302045857-e85c7b244fd2 h1:9r5DY45ef9LtcA6BnkhW8MPV7OKAfbf2AUwUhq3LeRk= -sigs.k8s.io/structured-merge-diff v0.0.0-20190302045857-e85c7b244fd2/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs= -sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e h1:4Z09Hglb792X0kfOBBJUPFEyvVfQWrYT/l8h5EKA6JQ= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -vbom.ml/util v0.0.0-20180919145318-efcd4e0f9787/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt new file mode 100644 index 00000000000..22a8df315b1 --- /dev/null +++ b/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/hack/tools.go b/hack/tools.go new file mode 100644 index 00000000000..7a21725cf10 --- /dev/null +++ b/hack/tools.go @@ -0,0 +1,6 @@ +// +build tools + +// This package imports things required by build scripts, to force `go mod` to see them as dependencies +package tools + +import _ "k8s.io/code-generator" diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh new file mode 100755 index 00000000000..82a531cdcac --- /dev/null +++ b/hack/update-codegen.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} + +# generate the code with: +# --output-base because this script should also be able to run inside the vendor dir of +# k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir +# instead of the $GOPATH directly. For normal projects this can be dropped. +# bash "${CODEGEN_PKG}"/generate-groups.sh "deepcopy,client,informer,lister" \ +# Deepcopy is generated by operator-sdk +bash "${CODEGEN_PKG}"/generate-groups.sh "client,informer,lister" \ + github.com/kedacore/keda/pkg/generated github.com/kedacore/keda/pkg/apis \ + keda:v1alpha1 \ + --go-header-file "${SCRIPT_ROOT}"/hack/boilerplate.go.txt + +# To use your own boilerplate text append: +# --go-header-file "${SCRIPT_ROOT}"/hack/custom-boilerplate.go.txt diff --git a/hack/verify-codegen.sh b/hack/verify-codegen.sh new file mode 100755 index 00000000000..b347be7ae7f --- /dev/null +++ b/hack/verify-codegen.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. + +DIFFROOT="${SCRIPT_ROOT}/pkg" +TMP_DIFFROOT="${SCRIPT_ROOT}/_tmp/pkg" +_tmp="${SCRIPT_ROOT}/_tmp" + +cleanup() { + rm -rf "${_tmp}" +} +trap "cleanup" EXIT SIGINT + +cleanup + +mkdir -p "${TMP_DIFFROOT}" +cp -a "${DIFFROOT}"/* "${TMP_DIFFROOT}" + +"${SCRIPT_ROOT}/hack/update-codegen.sh" +echo "diffing ${DIFFROOT} against freshly generated codegen" +ret=0 +diff -Naupr "${DIFFROOT}" "${TMP_DIFFROOT}" || ret=$? +cp -a "${TMP_DIFFROOT}"/* "${DIFFROOT}" +if [[ $ret -eq 0 ]] +then + echo "${DIFFROOT} up to date." +else + echo "${DIFFROOT} is out of date. Please run make generate-clientset" + exit 1 +fi diff --git a/pkg/apis/duck/verify.go b/pkg/apis/duck/verify.go new file mode 100644 index 00000000000..8f49731862e --- /dev/null +++ b/pkg/apis/duck/verify.go @@ -0,0 +1,46 @@ +package duck + +import ( + "encoding/json" + + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// Implementable is implemented by the Fooable duck type that consumers +// are expected to embed as a `.status.fooable` field. +type Implementable interface { + // GetFullType returns an instance of a full resource wrapping + // an instance of this Implementable that can populate its fields + // to verify json roundtripping. + GetFullType() Populatable +} + +// Populatable is implemented by a skeleton resource wrapping an Implementable +// duck type. It will generally have TypeMeta, ObjectMeta, and a Status field +// wrapping a Fooable field. +type Populatable interface { + Listable + + // Populate fills in all possible fields, so that we can verify that + // they roundtrip properly through JSON. + Populate() +} + +// Listable indicates that a particular type can be returned via the returned +// list type by the API server. +type Listable interface { + runtime.Object + + GetListType() runtime.Object +} + +// FromUnstructured takes unstructured object from (say from client-go/dynamic) and +// converts it into our duck types. +func FromUnstructured(obj json.Marshaler, target interface{}) error { + // Use the unstructured marshaller to ensure it's proper JSON + raw, err := obj.MarshalJSON() + if err != nil { + return err + } + return json.Unmarshal(raw, &target) +} diff --git a/pkg/apis/keda/v1alpha1/condition_types.go b/pkg/apis/keda/v1alpha1/condition_types.go new file mode 100644 index 00000000000..19cecc91bb5 --- /dev/null +++ b/pkg/apis/keda/v1alpha1/condition_types.go @@ -0,0 +1,133 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type ConditionType string + +const ( + // ConditionReady specifies that the resource is ready. + // For long-running resources. + ConditionReady ConditionType = "Ready" + // ConditionActive specifies that the resource has finished. + // For resource which run to completion. + ConditionActive ConditionType = "Active" +) + +type Condition struct { + // Type of condition + // +required + Type ConditionType `json:"type" description:"type of status condition"` + + // Status of the condition, one of True, False, Unknown. + // +required + Status metav1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` + + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"` + + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` +} + +type Conditions []Condition + +// AreInitialized performs check all Conditions are initialized +// return true if Conditions are initialized +// return false if Conditions are not initialized +func (c *Conditions) AreInitialized() bool { + foundReady := false + foundActive := false + if *c != nil { + for _, condition := range *c { + if condition.Type == ConditionReady { + foundReady = true + break + } + } + for _, condition := range *c { + if condition.Type == ConditionActive { + foundActive = true + break + } + } + } + + return foundReady && foundActive +} + +// GetInitializedConditions returns Conditions initialized to the default -> Status: Unknown +func GetInitializedConditions() *Conditions { + return &Conditions{{Type: ConditionReady, Status: metav1.ConditionUnknown}, {Type: ConditionActive, Status: metav1.ConditionUnknown}} +} + +// IsTrue is true if the condition is True +func (c *Condition) IsTrue() bool { + if c == nil { + return false + } + return c.Status == metav1.ConditionTrue +} + +// IsFalse is true if the condition is False +func (c *Condition) IsFalse() bool { + if c == nil { + return false + } + return c.Status == metav1.ConditionFalse +} + +// IsUnknown is true if the condition is Unknown +func (c *Condition) IsUnknown() bool { + if c == nil { + return true + } + return c.Status == metav1.ConditionUnknown +} + +// SetReadyCondition modifies Ready Condition according to input parameters +func (c *Conditions) SetReadyCondition(status metav1.ConditionStatus, reason string, message string) { + if *c == nil { + c = GetInitializedConditions() + } + c.setCondition(ConditionReady, status, reason, message) +} + +// SetActiveCondition modifies Active Condition according to input parameters +func (c *Conditions) SetActiveCondition(status metav1.ConditionStatus, reason string, message string) { + if *c == nil { + c = GetInitializedConditions() + } + c.setCondition(ConditionActive, status, reason, message) +} + +// GetActiveCondition returns Condition of type Active +func (c *Conditions) GetActiveCondition() Condition { + if *c == nil { + c = GetInitializedConditions() + } + return c.getCondition(ConditionActive) +} + +func (c Conditions) getCondition(conditionType ConditionType) Condition { + for i := range c { + if c[i].Type == conditionType { + return c[i] + } + } + return Condition{} +} + +func (c Conditions) setCondition(conditionType ConditionType, status metav1.ConditionStatus, reason string, message string) { + for i := range c { + if c[i].Type == conditionType { + c[i].Status = status + c[i].Reason = reason + c[i].Message = message + break + } + } +} diff --git a/pkg/apis/keda/v1alpha1/doc.go b/pkg/apis/keda/v1alpha1/doc.go index 284675fcdcc..bdcaea6eb0d 100644 --- a/pkg/apis/keda/v1alpha1/doc.go +++ b/pkg/apis/keda/v1alpha1/doc.go @@ -1,4 +1,4 @@ // Package v1alpha1 contains API Schema definitions for the keda v1alpha1 API group // +k8s:deepcopy-gen=package,register -// +groupName=keda.k8s.io +// +groupName=keda.sh package v1alpha1 diff --git a/pkg/apis/keda/v1alpha1/gvkr_types.go b/pkg/apis/keda/v1alpha1/gvkr_types.go new file mode 100644 index 00000000000..364d510a67f --- /dev/null +++ b/pkg/apis/keda/v1alpha1/gvkr_types.go @@ -0,0 +1,29 @@ +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupVersionKindResource provides unified structure for schema.GroupVersionKind and Resource +type GroupVersionKindResource struct { + Group string `json:"group"` + Version string `json:"version"` + Kind string `json:"kind"` + Resource string `json:"resource"` +} + +func (gvkr GroupVersionKindResource) GroupVersionKind() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: gvkr.Group, Version: gvkr.Version, Kind: gvkr.Kind} +} + +func (gvkr GroupVersionKindResource) GroupVersion() schema.GroupVersion { + return schema.GroupVersion{Group: gvkr.Group, Version: gvkr.Version} +} + +func (gvkr GroupVersionKindResource) GroupResource() schema.GroupResource { + return schema.GroupResource{Group: gvkr.Group, Resource: gvkr.Resource} +} + +func (gvkr GroupVersionKindResource) GVKString() string { + return gvkr.Group + "/" + gvkr.Version + "." + gvkr.Kind +} diff --git a/pkg/apis/keda/v1alpha1/podspec_types.go b/pkg/apis/keda/v1alpha1/podspec_types.go new file mode 100644 index 00000000000..2ceaf5c74bd --- /dev/null +++ b/pkg/apis/keda/v1alpha1/podspec_types.go @@ -0,0 +1,74 @@ +package v1alpha1 + +import ( + "github.com/kedacore/keda/pkg/apis/duck" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// PodSpecable is implemented by types containing a PodTemplateSpec +// in the manner of ReplicaSet, Deployment, DaemonSet, StatefulSet. +type PodSpecable corev1.PodTemplateSpec + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// WithPod is the shell that demonstrates how PodSpecable types wrap +// a PodSpec. +type WithPod struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec WithPodSpec `json:"spec,omitempty"` +} + +// WithPodSpec is the shell around the PodSpecable within WithPod. +type WithPodSpec struct { + Template PodSpecable `json:"template,omitempty"` +} + +// Assert that we implement the interfaces necessary to +// use duck.VerifyType. +var ( + _ duck.Populatable = (*WithPod)(nil) + _ duck.Implementable = (*PodSpecable)(nil) + _ duck.Listable = (*WithPod)(nil) +) + +// GetFullType implements duck.Implementable +func (*PodSpecable) GetFullType() duck.Populatable { + return &WithPod{} +} + +// Populate implements duck.Populatable +func (t *WithPod) Populate() { + t.Spec.Template = PodSpecable{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "container-name", + Image: "container-image:latest", + }}, + }, + } +} + +// GetListType implements apis.Listable +func (*WithPod) GetListType() runtime.Object { + return &WithPodList{} +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// WithPodList is a list of WithPod resources +type WithPodList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []WithPod `json:"items"` +} diff --git a/pkg/apis/keda/v1alpha1/register.go b/pkg/apis/keda/v1alpha1/register.go index 6b310631b16..c24368f23c0 100644 --- a/pkg/apis/keda/v1alpha1/register.go +++ b/pkg/apis/keda/v1alpha1/register.go @@ -2,7 +2,7 @@ // Package v1alpha1 contains API Schema definitions for the keda v1alpha1 API group // +k8s:deepcopy-gen=package,register -// +groupName=keda.k8s.io +// +groupName=keda.sh package v1alpha1 import ( @@ -12,8 +12,24 @@ import ( var ( // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: "keda.k8s.io", Version: "v1alpha1"} + SchemeGroupVersion = schema.GroupVersion{Group: "keda.sh", Version: "v1alpha1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + // added for generated clientset + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = SchemeBuilder.AddToScheme ) + +// added for generated clientset +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// added for generated clientset +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/pkg/apis/keda/v1alpha1/scaledjob_types.go b/pkg/apis/keda/v1alpha1/scaledjob_types.go new file mode 100644 index 00000000000..2c562314115 --- /dev/null +++ b/pkg/apis/keda/v1alpha1/scaledjob_types.go @@ -0,0 +1,68 @@ +package v1alpha1 + +import ( + batchv1 "k8s.io/api/batch/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ScaledJob is the Schema for the scaledjobs API +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=scaledjobs,scope=Namespaced,shortName=sj +// +kubebuilder:printcolumn:name="Triggers",type="string",JSONPath=".spec.triggers[*].type" +// +kubebuilder:printcolumn:name="Authentication",type="string",JSONPath=".spec.triggers[*].authenticationRef.name" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status" +// +kubebuilder:printcolumn:name="Active",type="string",JSONPath=".status.conditions[?(@.type==\"Active\")].status" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +type ScaledJob struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ScaledJobSpec `json:"spec,omitempty"` + Status ScaledJobStatus `json:"status,omitempty"` +} + +// ScaledJobSpec defines the desired state of ScaledJob +// +k8s:openapi-gen=true +type ScaledJobSpec struct { + + // TODO define the spec + + JobTargetRef *batchv1.JobSpec `json:"jobTargetRef"` + + // +optional + PollingInterval *int32 `json:"pollingInterval,omitempty"` + // +optional + CooldownPeriod *int32 `json:"cooldownPeriod,omitempty"` + // +optional + MinReplicaCount *int32 `json:"minReplicaCount,omitempty"` + // +optional + MaxReplicaCount *int32 `json:"maxReplicaCount,omitempty"` + Triggers []ScaleTriggers `json:"triggers"` +} + +// ScaledJobStatus defines the observed state of ScaledJob +// +k8s:openapi-gen=true +// +optional +type ScaledJobStatus struct { + // +optional + LastActiveTime *metav1.Time `json:"lastActiveTime,omitempty"` + // +optional + Conditions Conditions `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ScaledJobList contains a list of ScaledJob +type ScaledJobList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ScaledJob `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ScaledJob{}, &ScaledJobList{}) +} diff --git a/pkg/apis/keda/v1alpha1/scaledobject_types.go b/pkg/apis/keda/v1alpha1/scaledobject_types.go index 774387f4605..819c48479ad 100644 --- a/pkg/apis/keda/v1alpha1/scaledobject_types.go +++ b/pkg/apis/keda/v1alpha1/scaledobject_types.go @@ -1,28 +1,23 @@ package v1alpha1 import ( - batchv1 "k8s.io/api/batch/v1" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ScaledObjectScaleType distinguish between Deployment based and K8s Jobs -type ScaledObjectScaleType string - -const ( - // ScaleTypeDeployment specifies Deployment based ScaleObject - ScaleTypeDeployment ScaledObjectScaleType = "deployment" - // ScaleTypeJob specifies K8s Jobs based ScaleObject - ScaleTypeJob ScaledObjectScaleType = "job" -) - +// +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ScaledObject is a specification for a ScaledObject resource // +k8s:openapi-gen=true // +kubebuilder:subresource:status -// +kubebuilder:resource:path=scaledobjects,scope=Namespaced -// +kubebuilder:printcolumn:name="Deployment",type="string",JSONPath=".spec.scaleTargetRef.deploymentName" +// +kubebuilder:resource:path=scaledobjects,scope=Namespaced,shortName=so +// +kubebuilder:printcolumn:name="ScaleTargetKind",type="string",JSONPath=".status.scaleTargetKind" +// +kubebuilder:printcolumn:name="ScaleTargetName",type="string",JSONPath=".spec.scaleTargetRef.name" // +kubebuilder:printcolumn:name="Triggers",type="string",JSONPath=".spec.triggers[*].type" +// +kubebuilder:printcolumn:name="Authentication",type="string",JSONPath=".spec.triggers[*].authenticationRef.name" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status" +// +kubebuilder:printcolumn:name="Active",type="string",JSONPath=".status.conditions[?(@.type==\"Active\")].status" // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" type ScaledObject struct { metav1.TypeMeta `json:",inline"` @@ -36,12 +31,7 @@ type ScaledObject struct { // ScaledObjectSpec is the spec for a ScaledObject resource // +k8s:openapi-gen=true type ScaledObjectSpec struct { - // +optional - ScaleType ScaledObjectScaleType `json:"scaleType,omitempty"` - // +optional - ScaleTargetRef *ObjectReference `json:"scaleTargetRef,omitempty"` - // +optional - JobTargetRef *batchv1.JobSpec `json:"jobTargetRef,omitempty"` + ScaleTargetRef *ScaleTarget `json:"scaleTargetRef"` // +optional PollingInterval *int32 `json:"pollingInterval,omitempty"` // +optional @@ -50,15 +40,31 @@ type ScaledObjectSpec struct { MinReplicaCount *int32 `json:"minReplicaCount,omitempty"` // +optional MaxReplicaCount *int32 `json:"maxReplicaCount,omitempty"` - // +listType + // +optional + Advanced *AdvancedConfig `json:"advanced,omitempty"` + Triggers []ScaleTriggers `json:"triggers"` } -// ObjectReference holds the a reference to the deployment this -// ScaledObject applies +type AdvancedConfig struct { + HorizontalPodAutoscalerConfig *HorizontalPodAutoscalerConfig `json:"horizontalPodAutoscalerConfig,omitempty"` +} + +type HorizontalPodAutoscalerConfig struct { + ResourceMetrics []*autoscalingv2beta2.ResourceMetricSource `json:"resourceMetrics,omitempty"` + // +optional + Behavior *autoscalingv2beta2.HorizontalPodAutoscalerBehavior `json:"behavior,omitempty"` +} + +//ScaleTarget holds the a reference to the scale target Object // +k8s:openapi-gen=true -type ObjectReference struct { - DeploymentName string `json:"deploymentName"` +type ScaleTarget struct { + Name string `json:"name"` + // +optional + ApiVersion string `json:"apiVersion,omitempty"` + // +optional + Kind string `json:"kind,omitempty"` + // +optional ContainerName string `json:"containerName,omitempty"` } @@ -78,11 +84,16 @@ type ScaleTriggers struct { // +k8s:openapi-gen=true // +optional type ScaledObjectStatus struct { + // +optional + ScaleTargetKind string `json:"scaleTargetKind,omitempty"` + // +optional + ScaleTargetGVKR *GroupVersionKindResource `json:"scaleTargetGVKR,omitempty"` // +optional LastActiveTime *metav1.Time `json:"lastActiveTime,omitempty"` // +optional - // +listType ExternalMetricNames []string `json:"externalMetricNames,omitempty"` + // +optional + Conditions Conditions `json:"conditions,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/apis/keda/v1alpha1/triggerauthentication_types.go b/pkg/apis/keda/v1alpha1/triggerauthentication_types.go index 5c2f0baa159..61b52575ae8 100644 --- a/pkg/apis/keda/v1alpha1/triggerauthentication_types.go +++ b/pkg/apis/keda/v1alpha1/triggerauthentication_types.go @@ -4,6 +4,22 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TriggerAuthentication defines how a trigger can authenticate +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:resource:path=triggerauthentications,scope=Namespaced,shortName=ta;triggerauth +// +kubebuilder:printcolumn:name="PodIdentity",type="string",JSONPath=".spec.podIdentity.provider" +// +kubebuilder:printcolumn:name="Secret",type="string",JSONPath=".spec.secretTargetRef[*].name" +// +kubebuilder:printcolumn:name="Env",type="string",JSONPath=".spec.env[*].name" +type TriggerAuthentication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec TriggerAuthenticationSpec `json:"spec"` +} + // TriggerAuthenticationSpec defines the various ways to authenticate // +k8s:openapi-gen=true type TriggerAuthenticationSpec struct { @@ -11,11 +27,9 @@ type TriggerAuthenticationSpec struct { PodIdentity AuthPodIdentity `json:"podIdentity"` // +optional - // +listType SecretTargetRef []AuthSecretTargetRef `json:"secretTargetRef"` // +optional - // +listType Env []AuthEnvironment `json:"env"` // +optional @@ -24,18 +38,6 @@ type TriggerAuthenticationSpec struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// TriggerAuthentication defines how a trigger can authenticate -// +k8s:openapi-gen=true -// +kubebuilder:resource:path=triggerauthentications,scope=Namespaced -type TriggerAuthentication struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec TriggerAuthenticationSpec `json:"spec"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - // TriggerAuthenticationList contains a list of TriggerAuthentication type TriggerAuthenticationList struct { metav1.TypeMeta `json:",inline"` @@ -76,7 +78,7 @@ type AuthSecretTargetRef struct { } // AuthEnvironment is used to authenticate using environment variables -// in the destination deployment spec +// in the destination ScaleTarget spec // +k8s:openapi-gen=true type AuthEnvironment struct { Parameter string `json:"parameter"` @@ -91,9 +93,7 @@ type AuthEnvironment struct { type HashiCorpVault struct { Address string `json:"address"` Authentication VaultAuthentication `json:"authentication"` - - // +listType - Secrets []VaultSecret `json:"secrets"` + Secrets []VaultSecret `json:"secrets"` // +optional Credential Credential `json:"credential"` diff --git a/pkg/apis/keda/v1alpha1/withtriggers_types.go b/pkg/apis/keda/v1alpha1/withtriggers_types.go new file mode 100644 index 00000000000..a921403d72f --- /dev/null +++ b/pkg/apis/keda/v1alpha1/withtriggers_types.go @@ -0,0 +1,57 @@ +package v1alpha1 + +import ( + "github.com/kedacore/keda/pkg/apis/duck" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// WithTriggers is a specification for a resource with triggers +// +k8s:openapi-gen=false +type WithTriggers struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec WithTriggersSpec `json:"spec"` +} + +// WithTriggersSpec is the spec for a an object with triggers resource +type WithTriggersSpec struct { + PollingInterval *int32 `json:"pollingInterval,omitempty"` + Triggers []ScaleTriggers `json:"triggers"` +} + +// Assert that we implement the interfaces necessary to +// use duck.VerifyType. +var ( + _ duck.Populatable = (*WithTriggers)(nil) + _ duck.Implementable = (*ScaleTriggers)(nil) + _ duck.Listable = (*WithTriggers)(nil) +) + +// GetFullType implements duck.Implementable +func (*ScaleTriggers) GetFullType() duck.Populatable { + return &WithTriggers{} +} + +// Populate implements duck.Populatable +func (t *WithTriggers) Populate() { + t.Spec.Triggers = []ScaleTriggers{{}} +} + +// GetListType implements apis.Listable +func (*WithTriggers) GetListType() runtime.Object { + return &WithTriggersList{} +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// WithTriggersList is a list of ScaledObject resources +type WithTriggersList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []WithTriggers `json:"items"` +} diff --git a/pkg/apis/keda/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/keda/v1alpha1/zz_generated.deepcopy.go index 6875d2cd9be..74af57045c2 100644 --- a/pkg/apis/keda/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/keda/v1alpha1/zz_generated.deepcopy.go @@ -1,14 +1,51 @@ // +build !ignore_autogenerated +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ // Code generated by operator-sdk. DO NOT EDIT. package v1alpha1 import ( + v2beta2 "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/api/batch/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedConfig) DeepCopyInto(out *AdvancedConfig) { + *out = *in + if in.HorizontalPodAutoscalerConfig != nil { + in, out := &in.HorizontalPodAutoscalerConfig, &out.HorizontalPodAutoscalerConfig + *out = new(HorizontalPodAutoscalerConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedConfig. +func (in *AdvancedConfig) DeepCopy() *AdvancedConfig { + if in == nil { + return nil + } + out := new(AdvancedConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuthEnvironment) DeepCopyInto(out *AuthEnvironment) { *out = *in @@ -57,6 +94,42 @@ func (in *AuthSecretTargetRef) DeepCopy() *AuthSecretTargetRef { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Conditions) DeepCopyInto(out *Conditions) { + { + in := &in + *out = make(Conditions, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Conditions. +func (in Conditions) DeepCopy() Conditions { + if in == nil { + return nil + } + out := new(Conditions) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Credential) DeepCopyInto(out *Credential) { *out = *in @@ -73,6 +146,22 @@ func (in *Credential) DeepCopy() *Credential { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersionKindResource) DeepCopyInto(out *GroupVersionKindResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionKindResource. +func (in *GroupVersionKindResource) DeepCopy() *GroupVersionKindResource { + if in == nil { + return nil + } + out := new(GroupVersionKindResource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HashiCorpVault) DeepCopyInto(out *HashiCorpVault) { *out = *in @@ -96,17 +185,67 @@ func (in *HashiCorpVault) DeepCopy() *HashiCorpVault { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { +func (in *HorizontalPodAutoscalerConfig) DeepCopyInto(out *HorizontalPodAutoscalerConfig) { + *out = *in + if in.ResourceMetrics != nil { + in, out := &in.ResourceMetrics, &out.ResourceMetrics + *out = make([]*v2beta2.ResourceMetricSource, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(v2beta2.ResourceMetricSource) + (*in).DeepCopyInto(*out) + } + } + } + if in.Behavior != nil { + in, out := &in.Behavior, &out.Behavior + *out = new(v2beta2.HorizontalPodAutoscalerBehavior) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerConfig. +func (in *HorizontalPodAutoscalerConfig) DeepCopy() *HorizontalPodAutoscalerConfig { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSpecable) DeepCopyInto(out *PodSpecable) { *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. -func (in *ObjectReference) DeepCopy() *ObjectReference { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpecable. +func (in *PodSpecable) DeepCopy() *PodSpecable { if in == nil { return nil } - out := new(ObjectReference) + out := new(PodSpecable) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleTarget) DeepCopyInto(out *ScaleTarget) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleTarget. +func (in *ScaleTarget) DeepCopy() *ScaleTarget { + if in == nil { + return nil + } + out := new(ScaleTarget) in.DeepCopyInto(out) return out } @@ -139,6 +278,140 @@ func (in *ScaleTriggers) DeepCopy() *ScaleTriggers { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaledJob) DeepCopyInto(out *ScaledJob) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaledJob. +func (in *ScaledJob) DeepCopy() *ScaledJob { + if in == nil { + return nil + } + out := new(ScaledJob) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScaledJob) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaledJobList) DeepCopyInto(out *ScaledJobList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ScaledJob, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaledJobList. +func (in *ScaledJobList) DeepCopy() *ScaledJobList { + if in == nil { + return nil + } + out := new(ScaledJobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScaledJobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaledJobSpec) DeepCopyInto(out *ScaledJobSpec) { + *out = *in + if in.JobTargetRef != nil { + in, out := &in.JobTargetRef, &out.JobTargetRef + *out = new(v1.JobSpec) + (*in).DeepCopyInto(*out) + } + if in.PollingInterval != nil { + in, out := &in.PollingInterval, &out.PollingInterval + *out = new(int32) + **out = **in + } + if in.CooldownPeriod != nil { + in, out := &in.CooldownPeriod, &out.CooldownPeriod + *out = new(int32) + **out = **in + } + if in.MinReplicaCount != nil { + in, out := &in.MinReplicaCount, &out.MinReplicaCount + *out = new(int32) + **out = **in + } + if in.MaxReplicaCount != nil { + in, out := &in.MaxReplicaCount, &out.MaxReplicaCount + *out = new(int32) + **out = **in + } + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make([]ScaleTriggers, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaledJobSpec. +func (in *ScaledJobSpec) DeepCopy() *ScaledJobSpec { + if in == nil { + return nil + } + out := new(ScaledJobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaledJobStatus) DeepCopyInto(out *ScaledJobStatus) { + *out = *in + if in.LastActiveTime != nil { + in, out := &in.LastActiveTime, &out.LastActiveTime + *out = (*in).DeepCopy() + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(Conditions, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaledJobStatus. +func (in *ScaledJobStatus) DeepCopy() *ScaledJobStatus { + if in == nil { + return nil + } + out := new(ScaledJobStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ScaledObject) DeepCopyInto(out *ScaledObject) { *out = *in @@ -187,7 +460,7 @@ func (in *ScaledObjectAuthRef) DeepCopy() *ScaledObjectAuthRef { func (in *ScaledObjectList) DeepCopyInto(out *ScaledObjectList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ScaledObject, len(*in)) @@ -221,14 +494,9 @@ func (in *ScaledObjectSpec) DeepCopyInto(out *ScaledObjectSpec) { *out = *in if in.ScaleTargetRef != nil { in, out := &in.ScaleTargetRef, &out.ScaleTargetRef - *out = new(ObjectReference) + *out = new(ScaleTarget) **out = **in } - if in.JobTargetRef != nil { - in, out := &in.JobTargetRef, &out.JobTargetRef - *out = new(v1.JobSpec) - (*in).DeepCopyInto(*out) - } if in.PollingInterval != nil { in, out := &in.PollingInterval, &out.PollingInterval *out = new(int32) @@ -249,6 +517,11 @@ func (in *ScaledObjectSpec) DeepCopyInto(out *ScaledObjectSpec) { *out = new(int32) **out = **in } + if in.Advanced != nil { + in, out := &in.Advanced, &out.Advanced + *out = new(AdvancedConfig) + (*in).DeepCopyInto(*out) + } if in.Triggers != nil { in, out := &in.Triggers, &out.Triggers *out = make([]ScaleTriggers, len(*in)) @@ -272,6 +545,11 @@ func (in *ScaledObjectSpec) DeepCopy() *ScaledObjectSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ScaledObjectStatus) DeepCopyInto(out *ScaledObjectStatus) { *out = *in + if in.ScaleTargetGVKR != nil { + in, out := &in.ScaleTargetGVKR, &out.ScaleTargetGVKR + *out = new(GroupVersionKindResource) + **out = **in + } if in.LastActiveTime != nil { in, out := &in.LastActiveTime, &out.LastActiveTime *out = (*in).DeepCopy() @@ -281,6 +559,11 @@ func (in *ScaledObjectStatus) DeepCopyInto(out *ScaledObjectStatus) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(Conditions, len(*in)) + copy(*out, *in) + } return } @@ -325,7 +608,7 @@ func (in *TriggerAuthentication) DeepCopyObject() runtime.Object { func (in *TriggerAuthenticationList) DeepCopyInto(out *TriggerAuthenticationList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]TriggerAuthentication, len(*in)) @@ -397,3 +680,168 @@ func (in *VaultSecret) DeepCopy() *VaultSecret { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WithPod) DeepCopyInto(out *WithPod) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WithPod. +func (in *WithPod) DeepCopy() *WithPod { + if in == nil { + return nil + } + out := new(WithPod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WithPod) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WithPodList) DeepCopyInto(out *WithPodList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WithPod, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WithPodList. +func (in *WithPodList) DeepCopy() *WithPodList { + if in == nil { + return nil + } + out := new(WithPodList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WithPodList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WithPodSpec) DeepCopyInto(out *WithPodSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WithPodSpec. +func (in *WithPodSpec) DeepCopy() *WithPodSpec { + if in == nil { + return nil + } + out := new(WithPodSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WithTriggers) DeepCopyInto(out *WithTriggers) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WithTriggers. +func (in *WithTriggers) DeepCopy() *WithTriggers { + if in == nil { + return nil + } + out := new(WithTriggers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WithTriggers) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WithTriggersList) DeepCopyInto(out *WithTriggersList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WithTriggers, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WithTriggersList. +func (in *WithTriggersList) DeepCopy() *WithTriggersList { + if in == nil { + return nil + } + out := new(WithTriggersList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WithTriggersList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WithTriggersSpec) DeepCopyInto(out *WithTriggersSpec) { + *out = *in + if in.PollingInterval != nil { + in, out := &in.PollingInterval, &out.PollingInterval + *out = new(int32) + **out = **in + } + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make([]ScaleTriggers, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WithTriggersSpec. +func (in *WithTriggersSpec) DeepCopy() *WithTriggersSpec { + if in == nil { + return nil + } + out := new(WithTriggersSpec) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/controller/add_scaledjob.go b/pkg/controller/add_scaledjob.go new file mode 100644 index 00000000000..c277e3cccde --- /dev/null +++ b/pkg/controller/add_scaledjob.go @@ -0,0 +1,10 @@ +package controller + +import ( + "github.com/kedacore/keda/pkg/controller/scaledjob" +) + +func init() { + // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. + AddToManagerFuncs = append(AddToManagerFuncs, scaledjob.Add) +} diff --git a/pkg/controller/scaledjob/scaledjob_controller.go b/pkg/controller/scaledjob/scaledjob_controller.go new file mode 100644 index 00000000000..03e60fc591d --- /dev/null +++ b/pkg/controller/scaledjob/scaledjob_controller.go @@ -0,0 +1,144 @@ +package scaledjob + +import ( + "context" + + kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + + "k8s.io/apimachinery/pkg/api/errors" + //metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + //"k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + //"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var log = logf.Log.WithName("controller_scaledjob") + +/** +* USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller +* business logic. Delete these comments after modifying this file.* + */ + +// Add creates a new ScaledJob Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager) error { + return add(mgr, newReconciler(mgr)) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager) reconcile.Reconciler { + return &ReconcileScaledJob{client: mgr.GetClient(), scheme: mgr.GetScheme()} +} + +// add adds a new Controller to mgr with r as the reconcile.Reconciler +func add(mgr manager.Manager, r reconcile.Reconciler) error { + // Create a new controller + c, err := controller.New("scaledjob-controller", mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + + // Watch for changes to primary resource ScaledJob + err = c.Watch(&source.Kind{Type: &kedav1alpha1.ScaledJob{}}, &handler.EnqueueRequestForObject{}) + if err != nil { + return err + } + + // TODO(user): Modify this to be the types you create that are owned by the primary resource + // Watch for changes to secondary resource Pods and requeue the owner ScaledJob + // err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForOwner{ + // IsController: true, + // OwnerType: &kedav1alpha1.ScaledJob{}, + // }) + if err != nil { + return err + } + + return nil +} + +// blank assignment to verify that ReconcileScaledJob implements reconcile.Reconciler +var _ reconcile.Reconciler = &ReconcileScaledJob{} + +// ReconcileScaledJob reconciles a ScaledJob object +type ReconcileScaledJob struct { + // This client, initialized using mgr.Client() above, is a split client + // that reads objects from the cache and writes to the apiserver + client client.Client + scheme *runtime.Scheme +} + +// Reconcile reads that state of the cluster for a ScaledJob object and makes changes based on the state read +// and what is in the ScaledJob.Spec +// TODO(user): Modify this Reconcile function to implement your Controller logic. This example creates +// a Pod as an example +// Note: +// The Controller will requeue the Request to be processed again if the returned error is non-nil or +// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. +func (r *ReconcileScaledJob) Reconcile(request reconcile.Request) (reconcile.Result, error) { + reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) + reqLogger.Info("Reconciling ScaledJob") + + // Fetch the ScaledJob instance + instance := &kedav1alpha1.ScaledJob{} + err := r.client.Get(context.TODO(), request.NamespacedName, instance) + if err != nil { + if errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{}, err + } + + reqLogger.Info("Reconciling ScaledJob is NOT IMPLEMENTED yet") + + return reconcile.Result{}, nil +} + +// FIXME use ScaledJob +// reconcileJobType implemets reconciler logic for K8s Jobs based ScaleObject +// func (r *ReconcileScaledObject) reconcileJobType(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) ( error) { +// // scaledObject.Spec.ScaleType = kedav1alpha1.ScaleTypeJob + +// // Delete Jobs owned by the previous version of the ScaledObject +// opts := []client.ListOption{ +// client.InNamespace(scaledObject.GetNamespace()), +// client.MatchingLabels(map[string]string{"scaledobject": scaledObject.GetName()}), +// } +// jobs := &batchv1.JobList{} +// err := r.client.List(context.TODO(), jobs, opts...) +// if err != nil { +// logger.Error(err, "Cannot get list of Jobs owned by this ScaledObject") +// return err +// } + +// if jobs.Size() > 0 { +// logger.Info("Deleting jobs owned by the previous version of the ScaledObject", "Number of jobs to delete", jobs.Size()) +// } +// for _, job := range jobs.Items { +// err = r.client.Delete(context.TODO(), &job, client.PropagationPolicy(metav1.DeletePropagationBackground)) +// if err != nil { +// logger.Error(err, "Not able to delete job", "Job", job.Name) +// return err +// } +// } + +// // ScaledObject was created or modified - let's start a new ScaleLoop +// err = r.startScaleLoop(logger, scaledObject) +// if err != nil { +// logger.Error(err, "Failed to start a new ScaleLoop") +// return err +// } + +// return nil +// } diff --git a/pkg/controller/scaledobject/hpa.go b/pkg/controller/scaledobject/hpa.go new file mode 100644 index 00000000000..7640a2512d0 --- /dev/null +++ b/pkg/controller/scaledobject/hpa.go @@ -0,0 +1,203 @@ +package scaledobject + +import ( + "context" + "fmt" + + kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + kedacontrollerutil "github.com/kedacore/keda/pkg/controller/util" + + "github.com/go-logr/logr" + version "github.com/kedacore/keda/version" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +const ( + defaultHPAMinReplicas int32 = 1 + defaultHPAMaxReplicas int32 = 100 +) + +// createAndDeployNewHPA creates and deploy HPA in the cluster for specifed ScaledObject +func (r *ReconcileScaledObject) createAndDeployNewHPA(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, gvkr *kedav1alpha1.GroupVersionKindResource) error { + hpaName := getHPAName(scaledObject) + logger.Info("Creating a new HPA", "HPA.Namespace", scaledObject.Namespace, "HPA.Name", hpaName) + hpa, err := r.newHPAForScaledObject(logger, scaledObject, gvkr) + if err != nil { + logger.Error(err, "Failed to create new HPA resource", "HPA.Namespace", scaledObject.Namespace, "HPA.Name", hpaName) + return err + } + + // Set ScaledObject instance as the owner and controller + if err := controllerutil.SetControllerReference(scaledObject, hpa, r.scheme); err != nil { + return err + } + + err = r.client.Create(context.TODO(), hpa) + if err != nil { + logger.Error(err, "Failed to create new HPA in cluster", "HPA.Namespace", scaledObject.Namespace, "HPA.Name", hpaName) + return err + } + + return nil +} + +// newHPAForScaledObject returns HPA as it is specified in ScaledObject +func (r *ReconcileScaledObject) newHPAForScaledObject(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, gvkr *kedav1alpha1.GroupVersionKindResource) (*autoscalingv2beta2.HorizontalPodAutoscaler, error) { + scaledObjectMetricSpecs, err := r.getScaledObjectMetricSpecs(logger, scaledObject) + if err != nil { + return nil, err + } + + var behavior *autoscalingv2beta2.HorizontalPodAutoscalerBehavior + if r.kubeVersion.MinorVersion >= 18 && scaledObject.Spec.Advanced != nil && scaledObject.Spec.Advanced.HorizontalPodAutoscalerConfig != nil { + behavior = scaledObject.Spec.Advanced.HorizontalPodAutoscalerConfig.Behavior + } else { + behavior = nil + } + + // label can have max 63 chars + labelName := getHPAName(scaledObject) + if len(labelName) > 63 { + labelName = labelName[:63] + } + labels := map[string]string{ + "app.kubernetes.io/name": labelName, + "app.kubernetes.io/version": version.Version, + "app.kubernetes.io/part-of": scaledObject.Name, + "app.kubernetes.io/managed-by": "keda-operator", + } + + return &autoscalingv2beta2.HorizontalPodAutoscaler{ + Spec: autoscalingv2beta2.HorizontalPodAutoscalerSpec{ + MinReplicas: getHPAMinReplicas(scaledObject), + MaxReplicas: getHPAMaxReplicas(scaledObject), + Metrics: scaledObjectMetricSpecs, + Behavior: behavior, + ScaleTargetRef: autoscalingv2beta2.CrossVersionObjectReference{ + Name: scaledObject.Spec.ScaleTargetRef.Name, + Kind: gvkr.Kind, + APIVersion: gvkr.GroupVersion().String(), + }}, + ObjectMeta: metav1.ObjectMeta{ + Name: getHPAName(scaledObject), + Namespace: scaledObject.Namespace, + Labels: labels, + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "v2beta2", + }, + }, nil +} + +// updateHPAIfNeeded checks whether update of HPA is needed +func (r *ReconcileScaledObject) updateHPAIfNeeded(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, foundHpa *autoscalingv2beta2.HorizontalPodAutoscaler, gvkr *kedav1alpha1.GroupVersionKindResource) error { + + hpa, err := r.newHPAForScaledObject(logger, scaledObject, gvkr) + if err != nil { + logger.Error(err, "Failed to create new HPA resource", "HPA.Namespace", scaledObject.Namespace, "HPA.Name", getHPAName(scaledObject)) + return err + } + + if !equality.Semantic.DeepDerivative(hpa.Spec, foundHpa.Spec) { + logger.V(1).Info("Found difference in the HPA spec accordint to ScaledObject", "currentHPA", foundHpa.Spec, "newHPA", hpa.Spec) + if r.client.Update(context.TODO(), foundHpa) != nil { + foundHpa.Spec = hpa.Spec + logger.Error(err, "Failed to update HPA", "HPA.Namespace", foundHpa.Namespace, "HPA.Name", foundHpa.Name) + return err + } + // check if scaledObject.spec.behavior was defined, because it is supported only on k8s >= 1.18 + r.checkMinK8sVersionforHPABehavior(logger, scaledObject) + + logger.Info("Updated HPA according to ScaledObject", "HPA.Namespace", foundHpa.Namespace, "HPA.Name", foundHpa.Name) + } + + return nil +} + +// getScaledObjectMetricSpecs returns MetricSpec for HPA, generater from Triggers defitinion in ScaledObject +func (r *ReconcileScaledObject) getScaledObjectMetricSpecs(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) ([]autoscalingv2beta2.MetricSpec, error) { + var scaledObjectMetricSpecs []autoscalingv2beta2.MetricSpec + var externalMetricNames []string + + scalers, err := r.scaleHandler.GetScalers(scaledObject) + if err != nil { + logger.Error(err, "Error getting scalers") + return nil, err + } + + // Handling the Resource metrics through KEDA + if scaledObject.Spec.Advanced != nil && scaledObject.Spec.Advanced.HorizontalPodAutoscalerConfig != nil { + metrics := getResourceMetrics(scaledObject.Spec.Advanced.HorizontalPodAutoscalerConfig.ResourceMetrics) + scaledObjectMetricSpecs = append(scaledObjectMetricSpecs, metrics...) + } + + for _, scaler := range scalers { + metricSpecs := scaler.GetMetricSpecForScaling() + + // add the scaledObjectName label. This is how the MetricsAdapter will know which scaledobject a metric is for when the HPA queries it. + for _, metricSpec := range metricSpecs { + metricSpec.External.Metric.Selector = &metav1.LabelSelector{MatchLabels: make(map[string]string)} + metricSpec.External.Metric.Selector.MatchLabels["scaledObjectName"] = scaledObject.Name + externalMetricNames = append(externalMetricNames, metricSpec.External.Metric.Name) + } + scaledObjectMetricSpecs = append(scaledObjectMetricSpecs, metricSpecs...) + scaler.Close() + } + + // store External.MetricNames used by scalers defined in the ScaledObject + status := scaledObject.Status.DeepCopy() + status.ExternalMetricNames = externalMetricNames + err = kedacontrollerutil.UpdateScaledObjectStatus(r.client, logger, scaledObject, status) + if err != nil { + logger.Error(err, "Error updating scaledObject status with used externalMetricNames") + return nil, err + } + + return scaledObjectMetricSpecs, nil +} + +func getResourceMetrics(resourceMetrics []*autoscalingv2beta2.ResourceMetricSource) []autoscalingv2beta2.MetricSpec { + metrics := make([]autoscalingv2beta2.MetricSpec, 0, len(resourceMetrics)) + for _, resourceMetric := range resourceMetrics { + metrics = append(metrics, autoscalingv2beta2.MetricSpec{ + Type: "Resource", + Resource: resourceMetric, + }) + } + + return metrics +} + +// checkMinK8sVersionforHPABehavior min version (k8s v1.18) for HPA Behavior +func (r *ReconcileScaledObject) checkMinK8sVersionforHPABehavior(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) { + if r.kubeVersion.MinorVersion < 18 { + if scaledObject.Spec.Advanced != nil && scaledObject.Spec.Advanced.HorizontalPodAutoscalerConfig != nil && scaledObject.Spec.Advanced.HorizontalPodAutoscalerConfig.Behavior != nil { + logger.Info("Warning: Ignoring scaledObject.spec.behavior, it is only supported on kubernetes version >= 1.18", "kubernetes.version", r.kubeVersion.PrettyVersion) + } + } +} + +// getHPAName returns generated HPA name for ScaledObject specified in the parameter +func getHPAName(scaledObject *kedav1alpha1.ScaledObject) string { + return fmt.Sprintf("keda-hpa-%s", scaledObject.Name) +} + +// getHPAMinReplicas returns MinReplicas based on definition in ScaledObject or default value if not defined +func getHPAMinReplicas(scaledObject *kedav1alpha1.ScaledObject) *int32 { + if scaledObject.Spec.MinReplicaCount != nil && *scaledObject.Spec.MinReplicaCount > 0 { + return scaledObject.Spec.MinReplicaCount + } + tmp := defaultHPAMinReplicas + return &tmp +} + +// getHPAMaxReplicas returns MaxReplicas based on definition in ScaledObject or default value if not defined +func getHPAMaxReplicas(scaledObject *kedav1alpha1.ScaledObject) int32 { + if scaledObject.Spec.MaxReplicaCount != nil { + return *scaledObject.Spec.MaxReplicaCount + } + return defaultHPAMaxReplicas +} diff --git a/pkg/controller/scaledobject/scaledobject_controller.go b/pkg/controller/scaledobject/scaledobject_controller.go index 1702fdef5d5..9b25dedfb6e 100644 --- a/pkg/controller/scaledobject/scaledobject_controller.go +++ b/pkg/controller/scaledobject/scaledobject_controller.go @@ -5,22 +5,25 @@ import ( "fmt" "sync" - "github.com/go-logr/logr" kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" - scalehandler "github.com/kedacore/keda/pkg/handler" - version "github.com/kedacore/keda/version" + kedacontrollerutil "github.com/kedacore/keda/pkg/controller/util" + "github.com/kedacore/keda/pkg/scaling" + kedautil "github.com/kedacore/keda/pkg/util" - autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" - batchv1 "k8s.io/api/batch/v1" + "github.com/go-logr/logr" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/scale" "k8s.io/client-go/tools/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -29,22 +32,46 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" ) -const ( - defaultHPAMinReplicas int32 = 1 - defaultHPAMaxReplicas int32 = 100 -) - var log = logf.Log.WithName("controller_scaledobject") // Add creates a new ScaledObject Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. func Add(mgr manager.Manager) error { - return add(mgr, newReconciler(mgr)) + + clientset, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig()) + if err != nil { + return err + } + + // Find out Kubernetes version + var kubeVersion kedautil.K8sVersion + version, err := clientset.ServerVersion() + if err == nil { + kubeVersion = kedautil.NewK8sVersion(version) + log.Info("Running on Kubernetes " + kubeVersion.PrettyVersion) + } else { + log.Error(err, "Not able to get Kubernetes version") + } + + // Create Scale Client + scaleClient, err := initScaleClient(mgr, clientset) + if err != nil { + return err + } + return add(mgr, newReconciler(mgr, &scaleClient, kubeVersion)) } // newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager) reconcile.Reconciler { - return &ReconcileScaledObject{client: mgr.GetClient(), scheme: mgr.GetScheme(), scaleLoopContexts: &sync.Map{}, scaledObjectsGenerations: &sync.Map{}} +func newReconciler(mgr manager.Manager, scaleClient *scale.ScalesGetter, kubeVersion kedautil.K8sVersion) reconcile.Reconciler { + return &ReconcileScaledObject{ + client: mgr.GetClient(), + scaleClient: scaleClient, + restMapper: mgr.GetRESTMapper(), + scheme: mgr.GetScheme(), + scaledObjectsGenerations: &sync.Map{}, + scaleHandler: scaling.NewScaleHandler(mgr.GetClient(), scaleClient, mgr.GetScheme()), + kubeVersion: kubeVersion, + } } // add adds a new Controller to mgr with r as the reconcile.Reconciler @@ -58,19 +85,16 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { // Watch for changes to primary resource ScaledObject err = c.Watch(&source.Kind{Type: &kedav1alpha1.ScaledObject{}}, &handler.EnqueueRequestForObject{}, - predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - // Ignore updates to ScaledObject Status (in this case metadata.Generation does not change) - // so reconcile loop is not started on Status updates - return e.MetaOld.GetGeneration() != e.MetaNew.GetGeneration() - }, - }) + // Ignore updates to ScaledObject Status (in this case metadata.Generation does not change) + // so reconcile loop is not started on Status updates + predicate.GenerationChangedPredicate{}, + ) if err != nil { return err } // Watch for changes to secondary resource HPA and requeue the owner ScaledObject - err = c.Watch(&source.Kind{Type: &autoscalingv2beta1.HorizontalPodAutoscaler{}}, &handler.EnqueueRequestForOwner{ + err = c.Watch(&source.Kind{Type: &autoscalingv2beta2.HorizontalPodAutoscaler{}}, &handler.EnqueueRequestForOwner{ IsController: true, OwnerType: &kedav1alpha1.ScaledObject{}, }) @@ -80,6 +104,16 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { return nil } +func initScaleClient(mgr manager.Manager, clientset *discovery.DiscoveryClient) (scale.ScalesGetter, error) { + + scaleKindResolver := scale.NewDiscoveryScaleKindResolver(clientset) + return scale.New( + clientset.RESTClient(), mgr.GetRESTMapper(), + dynamic.LegacyAPIPathResolverFunc, + scaleKindResolver, + ), nil +} + // blank assignment to verify that ReconcileScaledObject implements reconcile.Reconciler var _ reconcile.Reconciler = &ReconcileScaledObject{} @@ -88,9 +122,12 @@ type ReconcileScaledObject struct { // This client, initialized using mgr.Client() above, is a split client // that reads objects from the cache and writes to the apiserver client client.Client + scaleClient *scale.ScalesGetter + restMapper meta.RESTMapper scheme *runtime.Scheme - scaleLoopContexts *sync.Map scaledObjectsGenerations *sync.Map + scaleHandler scaling.ScaleHandler + kubeVersion kedautil.K8sVersion } // Reconcile reads that state of the cluster for a ScaledObject object and makes changes based on the state read @@ -100,7 +137,6 @@ type ReconcileScaledObject struct { // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. func (r *ReconcileScaledObject) Reconcile(request reconcile.Request) (reconcile.Result, error) { reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) - reqLogger.Info("Reconciling ScaledObject") // Fetch the ScaledObject instance scaledObject := &kedav1alpha1.ScaledObject{} @@ -117,218 +153,185 @@ func (r *ReconcileScaledObject) Reconcile(request reconcile.Request) (reconcile. return reconcile.Result{}, err } + reqLogger.Info("Reconciling ScaledObject") + // Check if the ScaledObject instance is marked to be deleted, which is // indicated by the deletion timestamp being set. - isScaledObjectMarkedToBeDeleted := scaledObject.GetDeletionTimestamp() != nil - if isScaledObjectMarkedToBeDeleted { - if contains(scaledObject.GetFinalizers(), scaledObjectFinalizer) { - // Run finalization logic for scaledObjectFinalizer. If the - // finalization logic fails, don't remove the finalizer so - // that we can retry during the next reconciliation. - if err := r.finalizeScaledObject(reqLogger, scaledObject); err != nil { - return reconcile.Result{}, err - } - - // Remove scaledObjectFinalizer. Once all finalizers have been - // removed, the object will be deleted. - scaledObject.SetFinalizers(remove(scaledObject.GetFinalizers(), scaledObjectFinalizer)) - err := r.client.Update(context.TODO(), scaledObject) - if err != nil { - return reconcile.Result{}, err - } - } - return reconcile.Result{}, nil + if scaledObject.GetDeletionTimestamp() != nil { + return reconcile.Result{}, r.finalizeScaledObject(reqLogger, scaledObject) } - // Add finalizer for this CR - if !contains(scaledObject.GetFinalizers(), scaledObjectFinalizer) { - if err := r.addFinalizer(reqLogger, scaledObject); err != nil { - return reconcile.Result{}, err - } + // ensure finalizer is set on this CR + if err := r.ensureFinalizer(reqLogger, scaledObject); err != nil { + return reconcile.Result{}, err } - reqLogger.V(1).Info("Detecting ScaleType from ScaledObject") - var errMsg string - if scaledObject.Spec.ScaleTargetRef != nil { - if scaledObject.Spec.JobTargetRef == nil { - reqLogger.Info("Detected ScaleType = Deployment") - return r.reconcileDeploymentType(reqLogger, scaledObject) - } - errMsg = "Both ScaledObject.Spec.ScaleTargetRef and ScaledObject.Spec.JobTargetRef cannot be set at the same time" - } else if scaledObject.Spec.JobTargetRef != nil { - reqLogger.Info("Detected ScaleType = Job") - return r.reconcileJobType(reqLogger, scaledObject) - } else { - errMsg = "ScaledObject.Spec.ScaleTargetRef or ScaledObject.Spec.JobTargetRef is not set" + // ensure Status Conditions are initialized + if !scaledObject.Status.Conditions.AreInitialized() { + conditions := kedav1alpha1.GetInitializedConditions() + kedacontrollerutil.SetStatusConditions(r.client, reqLogger, scaledObject, conditions) } - if errMsg == "" { - errMsg = "Unknown error while detecting ScaleType" + + // reconcile ScaledObject and set status appropriately + msg, err := r.reconcileScaledObject(reqLogger, scaledObject) + conditions := scaledObject.Status.Conditions.DeepCopy() + if err != nil { + reqLogger.Error(err, msg) + conditions.SetReadyCondition(metav1.ConditionFalse, "ScaledObjectCheckFailed", msg) + conditions.SetActiveCondition(metav1.ConditionUnknown, "UnkownState", "ScaledObject check failed") + } else { + reqLogger.V(1).Info(msg) + conditions.SetReadyCondition(metav1.ConditionTrue, "ScaledObjectReady", msg) } - err = fmt.Errorf(errMsg) - reqLogger.Error(err, "Failed to detect ScaleType") + kedacontrollerutil.SetStatusConditions(r.client, reqLogger, scaledObject, &conditions) return reconcile.Result{}, err } -// reconcileJobType implemets reconciler logic for K8s Jobs based ScaleObject -func (r *ReconcileScaledObject) reconcileJobType(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (reconcile.Result, error) { - scaledObject.Spec.ScaleType = kedav1alpha1.ScaleTypeJob +// reconcileScaledObject implements reconciler logic for ScaleObject +func (r *ReconcileScaledObject) reconcileScaledObject(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (string, error) { - // Delete Jobs owned by the previous version of the ScaledObject - opts := []client.ListOption{ - client.InNamespace(scaledObject.GetNamespace()), - client.MatchingLabels(map[string]string{"scaledobject": scaledObject.GetName()}), - } - jobs := &batchv1.JobList{} - err := r.client.List(context.TODO(), jobs, opts...) - if err != nil { - logger.Error(err, "Cannot get list of Jobs owned by this ScaledObject") - return reconcile.Result{}, err + // Check scale target Name is specified + if scaledObject.Spec.ScaleTargetRef.Name == "" { + err := fmt.Errorf("ScaledObject.spec.scaleTargetRef.name is missing") + return "ScaledObject doesn't have correct scaleTargetRef specification", err } - if jobs.Size() > 0 { - logger.Info("Deleting jobs owned by the previous version of the ScaledObject", "Number of jobs to delete", jobs.Size()) - } - for _, job := range jobs.Items { - err = r.client.Delete(context.TODO(), &job, client.PropagationPolicy(metav1.DeletePropagationBackground)) - if err != nil { - logger.Error(err, "Not able to delete job", "Job", job.Name) - return reconcile.Result{}, err - } - } - - // ScaledObject was created or modified - let's start a new ScaleLoop - err = r.startScaleLoop(logger, scaledObject) + // Check the label needed for Metrics servers is present on ScaledObject + err := r.ensureScaledObjectLabel(logger, scaledObject) if err != nil { - logger.Error(err, "Failed to start a new ScaleLoop") - return reconcile.Result{}, err + return "Failed to update ScaledObject with scaledObjectName label", err } - return reconcile.Result{}, nil -} - -// reconcileDeploymentType implements reconciler logic for Deployment based ScaleObject -func (r *ReconcileScaledObject) reconcileDeploymentType(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (reconcile.Result, error) { - scaledObject.Spec.ScaleType = kedav1alpha1.ScaleTypeDeployment - - deploymentName, err := checkDeploymentTypeScaledObject(scaledObject) + // Check if resource targeted for scaling exists and exposes /scale subresource + gvkr, err := r.checkTargetResourceIsScalable(logger, scaledObject) if err != nil { - logger.Error(err, "Notified about ScaledObject with incorrect deploymentName specification") - return reconcile.Result{}, err + return "ScaledObject doesn't have correct scaleTargetRef specification", err } - // add deploymentName label if needed - err = r.checkScaledObjectLabel(logger, scaledObject) + // Create a new HPA or update existing one according to ScaledObject + newHPACreated, err := r.ensureHPAForScaledObjectExists(logger, scaledObject, &gvkr) if err != nil { - logger.Error(err, "Failed to update ScaledObject with deploymentName label") - return reconcile.Result{}, err - } - - hpaName := getHpaName(deploymentName) - hpaNamespace := scaledObject.Namespace - - // Check if this HPA already exists - foundHpa := &autoscalingv2beta1.HorizontalPodAutoscaler{} - err = r.client.Get(context.TODO(), types.NamespacedName{Name: hpaName, Namespace: hpaNamespace}, foundHpa) - if err != nil && errors.IsNotFound(err) { - logger.Info("Creating a new HPA", "HPA.Namespace", hpaNamespace, "HPA.Name", hpaName) - hpa, err := r.newHPAForScaledObject(logger, scaledObject) + return "Failed to ensure HPA is correctly created for ScaledObject", err + } + scaleObjectSpecChanged := false + if !newHPACreated { + // Lets Check whether ScaledObject generation was changed, ie. there were changes in ScaledObject.Spec + // if it was changed we should start a new ScaleLoop + // (we can omit this check if a new HPA was created, which fires new ScaleLoop anyway) + scaleObjectSpecChanged, err = r.scaledObjectGenerationChanged(logger, scaledObject) if err != nil { - logger.Error(err, "Failed to create new HPA resource", "HPA.Namespace", hpaNamespace, "HPA.Name", hpaName) - return reconcile.Result{}, err + return "Failed to check whether ScaledObject's Generation was changed", err } + } - // Set ScaledObject instance as the owner and controller - if err := controllerutil.SetControllerReference(scaledObject, hpa, r.scheme); err != nil { - return reconcile.Result{}, err + // Notify ScaleHandler if a new HPA was created or if ScaledObject was updated + if newHPACreated || scaleObjectSpecChanged { + if r.requestScaleLoop(logger, scaledObject) != nil { + return "Failed to start a new scale loop with scaling logic", err + } else { + logger.Info("Initializing Scaling logic according to ScaledObject Specification") } + } - err = r.client.Create(context.TODO(), hpa) - if err != nil { - logger.Error(err, "Failed to create new HPA in cluster", "HPA.Namespace", hpaNamespace, "HPA.Name", hpaName) - return reconcile.Result{}, err - } + return "ScaledObject is defined correctly and is ready for scaling", nil +} - // ScaledObject was created - let's start a new ScaleLoop - err = r.startScaleLoop(logger, scaledObject) - if err != nil { - logger.Error(err, "Failed to start a new ScaleLoop") - return reconcile.Result{}, err - } +// ensureScaledObjectLabel ensures that scaledObjectName= label exist in the ScaledObject +// This is how the MetricsAdapter will know which ScaledObject a metric is for when the HPA queries it. +func (r *ReconcileScaledObject) ensureScaledObjectLabel(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error { + const labelScaledObjectName = "scaledObjectName" - // HPA created successfully & ScaleLoop started - don't requeue - return reconcile.Result{}, nil - } else if err != nil { - logger.Error(err, "Failed to get HPA") - return reconcile.Result{}, err + if scaledObject.Labels == nil { + scaledObject.Labels = map[string]string{labelScaledObjectName: scaledObject.Name} + } else { + value, found := scaledObject.Labels[labelScaledObjectName] + if found && value == scaledObject.Name { + return nil + } + scaledObject.Labels[labelScaledObjectName] = scaledObject.Name } - // Update hpa HPA if needed - updateHpa, err := r.checkHPAForUpdate(logger, scaledObject, foundHpa, deploymentName) + logger.V(1).Info("Adding scaledObjectName label on ScaledObject", "value", scaledObject.Name) + return r.client.Update(context.TODO(), scaledObject) +} + +// checkTargetResourceIsScalable checks if resource targeted for scaling exists and exposes /scale subresource +func (r *ReconcileScaledObject) checkTargetResourceIsScalable(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (kedav1alpha1.GroupVersionKindResource, error) { + gvkr, err := kedautil.ParseGVKR(r.restMapper, scaledObject.Spec.ScaleTargetRef.ApiVersion, scaledObject.Spec.ScaleTargetRef.Kind) if err != nil { - logger.Error(err, "Failed to check HPA for possible update") - return reconcile.Result{}, err - } - if updateHpa { - err = r.client.Update(context.TODO(), foundHpa) - if err != nil { - logger.Error(err, "Failed to update HPA", "HPA.Namespace", foundHpa.Namespace, "HPA.Name", foundHpa.Name) - return reconcile.Result{}, err + logger.Error(err, "Failed to parse Group, Version, Kind, Resource", "apiVersion", scaledObject.Spec.ScaleTargetRef.ApiVersion, "kind", scaledObject.Spec.ScaleTargetRef.Kind) + return gvkr, err + } + gvkString := gvkr.GVKString() + logger.V(1).Info("Parsed Group, Version, Kind, Resource", "GVK", gvkString, "Resource", gvkr.Resource) + + // let's try to detect /scale subresource + _, errScale := (*r.scaleClient).Scales(scaledObject.Namespace).Get(context.TODO(), gvkr.GroupResource(), scaledObject.Spec.ScaleTargetRef.Name, metav1.GetOptions{}) + if errScale != nil { + // not able to get /scale subresource -> let's check if the resource even exist in the cluster + unstruct := &unstructured.Unstructured{} + unstruct.SetGroupVersionKind(gvkr.GroupVersionKind()) + if err := r.client.Get(context.TODO(), client.ObjectKey{Namespace: scaledObject.Namespace, Name: scaledObject.Spec.ScaleTargetRef.Name}, unstruct); err != nil { + // resource doesn't exist + logger.Error(err, "Target resource doesn't exist", "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name) + return gvkr, err + } else { + // resource exist but doesn't expose /scale subresource + logger.Error(errScale, "Target resource doesn't expose /scale subresource", "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name) + return gvkr, errScale } - logger.Info("Updated HPA according to ScaledObject", "HPA.Namespace", hpaNamespace, "HPA.Name", hpaName) } - // Let's start a new ScaleLoop if ScaledObject's Generation was changed - updateNeeded, err := r.scaledObjectGenerationChanged(logger, scaledObject) - if err != nil { - logger.Error(err, "Failed to check ScaledObject's Generation change") - return reconcile.Result{}, err - } - if updateNeeded { - err = r.startScaleLoop(logger, scaledObject) - if err != nil { - logger.Error(err, "Failed to start a new ScaleLoop") - return reconcile.Result{}, err + // store discovered GVK and GVKR into the Status if it is not present already + if scaledObject.Status.ScaleTargetKind != gvkString { + status := scaledObject.Status.DeepCopy() + status.ScaleTargetKind = gvkString + status.ScaleTargetGVKR = &gvkr + if err := kedacontrollerutil.UpdateScaledObjectStatus(r.client, logger, scaledObject, status); err != nil { + return gvkr, err } + logger.Info("Detected resource targeted for scaling", "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name) } - return reconcile.Result{}, nil + return gvkr, nil } -func checkDeploymentTypeScaledObject(scaledObject *kedav1alpha1.ScaledObject) (string, error) { - var err error - var errMsg string +// ensureHPAForScaledObjectExists ensures that in cluster exist up-to-date HPA for specified ScaledObject, returns true if a new HPA was created +func (r *ReconcileScaledObject) ensureHPAForScaledObjectExists(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, gvkr *kedav1alpha1.GroupVersionKindResource) (bool, error) { + hpaName := getHPAName(scaledObject) + foundHpa := &autoscalingv2beta2.HorizontalPodAutoscaler{} + // Check if HPA for this ScaledObject already exists + err := r.client.Get(context.TODO(), types.NamespacedName{Name: hpaName, Namespace: scaledObject.Namespace}, foundHpa) + if err != nil && errors.IsNotFound(err) { + // HPA wasn't found -> let's create a new one + err = r.createAndDeployNewHPA(logger, scaledObject, gvkr) + if err != nil { + return false, err + } - deploymentName := scaledObject.Spec.ScaleTargetRef.DeploymentName + // check if scaledObject.spec.behavior was defined, because it is supported only on k8s >= 1.18 + r.checkMinK8sVersionforHPABehavior(logger, scaledObject) - if deploymentName == "" { - errMsg = "ScaledObject.spec.scaleTargetRef.deploymentName is missing" - err = fmt.Errorf(errMsg) + // new HPA created successfully -> notify Reconcile function so it could fire a new ScaleLoop + return true, nil + } else if err != nil { + logger.Error(err, "Failed to get HPA from cluster") + return false, err } - return deploymentName, err -} - -func (r *ReconcileScaledObject) checkScaledObjectLabel(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error { - if scaledObject.Labels == nil { - scaledObject.Labels = map[string]string{"deploymentName": scaledObject.Spec.ScaleTargetRef.DeploymentName} - } else { - value, found := scaledObject.Labels["deploymentName"] - if found && value == scaledObject.Spec.ScaleTargetRef.DeploymentName { - return nil - } - scaledObject.Labels["deploymentName"] = scaledObject.Spec.ScaleTargetRef.DeploymentName + // HPA was found -> let's check if we need to update it + err = r.updateHPAIfNeeded(logger, scaledObject, foundHpa, gvkr) + if err != nil { + logger.Error(err, "Failed to check HPA for possible update") + return false, err } - logger.V(1).Info("Adding deploymentName label on ScaledObject") - return r.client.Update(context.TODO(), scaledObject) + return false, nil } // startScaleLoop starts ScaleLoop handler for the respective ScaledObject -func (r *ReconcileScaledObject) startScaleLoop(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error { - - logger.V(1).Info("Starting a new ScaleLoop") - - scaleHandler := scalehandler.NewScaleHandler(r.client, r.scheme) +func (r *ReconcileScaledObject) requestScaleLoop(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error { + logger.V(1).Info("Notify scaleHandler of an update in scaledObject") key, err := cache.MetaNamespaceKeyFunc(scaledObject) if err != nil { @@ -336,25 +339,33 @@ func (r *ReconcileScaledObject) startScaleLoop(logger logr.Logger, scaledObject return err } + if err = r.scaleHandler.HandleScalableObject(scaledObject); err != nil { + return err + } + // store ScaledObject's current Generation r.scaledObjectsGenerations.Store(key, scaledObject.Generation) - ctx, cancel := context.WithCancel(context.TODO()) + return nil +} - // cancel the outdated ScaleLoop for the same ScaledObject (if exists) - value, loaded := r.scaleLoopContexts.LoadOrStore(key, cancel) - if loaded { - cancelValue, ok := value.(context.CancelFunc) - if ok { - cancelValue() - } - r.scaleLoopContexts.Store(key, cancel) +// stopScaleLoop stops ScaleLoop handler for the respective ScaleObject +func (r *ReconcileScaledObject) stopScaleLoop(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error { + key, err := cache.MetaNamespaceKeyFunc(scaledObject) + if err != nil { + logger.Error(err, "Error getting key for scaledObject") + return err } - go scaleHandler.HandleScaleLoop(ctx, scaledObject) + if err := r.scaleHandler.DeleteScalableObject(scaledObject); err != nil { + return err + } + // delete ScaledObject's current Generation + r.scaledObjectsGenerations.Delete(key) return nil } +// scaledObjectGenerationChanged returns true if ScaledObject's Generation was changed, ie. ScaledObject.Spec was changed func (r *ReconcileScaledObject) scaledObjectGenerationChanged(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (bool, error) { key, err := cache.MetaNamespaceKeyFunc(scaledObject) if err != nil { @@ -371,132 +382,3 @@ func (r *ReconcileScaledObject) scaledObjectGenerationChanged(logger logr.Logger } return true, nil } - -// newHPAForScaledObject returns HPA as it is specified in ScaledObject -func (r *ReconcileScaledObject) newHPAForScaledObject(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (*autoscalingv2beta1.HorizontalPodAutoscaler, error) { - deploymentName := scaledObject.Spec.ScaleTargetRef.DeploymentName - scaledObjectMetricSpecs, err := r.getScaledObjectMetricSpecs(logger, scaledObject, deploymentName) - - // label can have max 63 chars - labelName := "" - if len(getHpaName(deploymentName)) > 63 { - labelName = getHpaName(deploymentName)[:63] - } else { - labelName = getHpaName(deploymentName) - } - labels := map[string]string{ - "app.kubernetes.io/name": labelName, - "app.kubernetes.io/version": version.Version, - "app.kubernetes.io/part-of": scaledObject.GetName(), - "app.kubernetes.io/managed-by": "keda-operator", - } - - if err != nil { - return nil, err - } - - return &autoscalingv2beta1.HorizontalPodAutoscaler{ - Spec: autoscalingv2beta1.HorizontalPodAutoscalerSpec{ - MinReplicas: getHpaMinReplicas(scaledObject), - MaxReplicas: getHpaMaxReplicas(scaledObject), - Metrics: scaledObjectMetricSpecs, - ScaleTargetRef: autoscalingv2beta1.CrossVersionObjectReference{ - Name: deploymentName, - Kind: "Deployment", - APIVersion: "apps/v1", - }}, - ObjectMeta: metav1.ObjectMeta{ - Name: getHpaName(deploymentName), - Namespace: scaledObject.Namespace, - Labels: labels, - }, - TypeMeta: metav1.TypeMeta{ - APIVersion: "v2beta1", - }, - }, nil -} - -// checkHPAForUpdate checks whether update of HPA is needed -func (r *ReconcileScaledObject) checkHPAForUpdate(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, foundHpa *autoscalingv2beta1.HorizontalPodAutoscaler, deploymentName string) (bool, error) { - updateHPA := false - scaledObjectMinReplicaCount := getHpaMinReplicas(scaledObject) - if *foundHpa.Spec.MinReplicas != *scaledObjectMinReplicaCount { - updateHPA = true - foundHpa.Spec.MinReplicas = scaledObjectMinReplicaCount - } - - scaledObjectMaxReplicaCount := getHpaMaxReplicas(scaledObject) - if foundHpa.Spec.MaxReplicas != scaledObjectMaxReplicaCount { - updateHPA = true - foundHpa.Spec.MaxReplicas = scaledObjectMaxReplicaCount - } - - newMetricSpec, err := r.getScaledObjectMetricSpecs(logger, scaledObject, deploymentName) - if err != nil { - logger.Error(err, "Failed to create MetricSpec") - return true, err - } - if fmt.Sprintf("%v", foundHpa.Spec.Metrics) != fmt.Sprintf("%v", newMetricSpec) { - updateHPA = true - foundHpa.Spec.Metrics = newMetricSpec - } - - return updateHPA, nil -} - -// getScaledObjectMetricSpecs returns MetricSpec for HPA, generater from Triggers defitinion in ScaledObject -func (r *ReconcileScaledObject) getScaledObjectMetricSpecs(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, deploymentName string) ([]autoscalingv2beta1.MetricSpec, error) { - var scaledObjectMetricSpecs []autoscalingv2beta1.MetricSpec - var externalMetricNames []string - - scalers, _, err := scalehandler.NewScaleHandler(r.client, r.scheme).GetDeploymentScalers(scaledObject) - if err != nil { - logger.Error(err, "Error getting scalers") - return nil, err - } - - for _, scaler := range scalers { - metricSpecs := scaler.GetMetricSpecForScaling() - - // add the deploymentName label. This is how the MetricsAdapter will know which scaledobject a metric is for when the HPA queries it. - for _, metricSpec := range metricSpecs { - metricSpec.External.MetricSelector = &metav1.LabelSelector{MatchLabels: make(map[string]string)} - metricSpec.External.MetricSelector.MatchLabels["deploymentName"] = deploymentName - externalMetricNames = append(externalMetricNames, metricSpec.External.MetricName) - } - scaledObjectMetricSpecs = append(scaledObjectMetricSpecs, metricSpecs...) - scaler.Close() - } - - // store External.MetricNames used by scalers defined in the ScaledObject - scaledObject.Status.ExternalMetricNames = externalMetricNames - err = r.client.Status().Update(context.TODO(), scaledObject) - if err != nil { - logger.Error(err, "Error updating scaledObject status with used externalMetricNames") - return nil, err - } - - return scaledObjectMetricSpecs, nil -} - -// getHpaName returns generated HPA name for DeploymentName specified in the parameter -func getHpaName(deploymentName string) string { - return fmt.Sprintf("keda-hpa-%s", deploymentName) -} - -// getHpaMinReplicas returns MinReplicas based on definition in ScaledObject or default value if not defined -func getHpaMinReplicas(scaledObject *kedav1alpha1.ScaledObject) *int32 { - if scaledObject.Spec.MinReplicaCount != nil && *scaledObject.Spec.MinReplicaCount > 0 { - return scaledObject.Spec.MinReplicaCount - } - tmp := defaultHPAMinReplicas - return &tmp -} - -// getHpaMaxReplicas returns MaxReplicas based on definition in ScaledObject or default value if not defined -func getHpaMaxReplicas(scaledObject *kedav1alpha1.ScaledObject) int32 { - if scaledObject.Spec.MaxReplicaCount != nil { - return *scaledObject.Spec.MaxReplicaCount - } - return defaultHPAMaxReplicas -} diff --git a/pkg/controller/scaledobject/scaledobject_finalizer.go b/pkg/controller/scaledobject/scaledobject_finalizer.go index d2ff73bb286..179a2a18ce5 100644 --- a/pkg/controller/scaledobject/scaledobject_finalizer.go +++ b/pkg/controller/scaledobject/scaledobject_finalizer.go @@ -3,52 +3,52 @@ package scaledobject import ( "context" - "github.com/go-logr/logr" - "k8s.io/client-go/tools/cache" - kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + + "github.com/go-logr/logr" ) const ( - scaledObjectFinalizer = "finalizer.keda.k8s.io" + scaledObjectFinalizer = "finalizer.keda.sh" ) -// finalizeScaledObject is stopping ScaleLoop for the respective ScaleObject +// finalizeScaledObject runs finalization logic on ScaledObject if there's finalizer func (r *ReconcileScaledObject) finalizeScaledObject(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error { - key, err := cache.MetaNamespaceKeyFunc(scaledObject) - if err != nil { - logger.Error(err, "Error getting key for scaledObject (%s/%s)", scaledObject.GetNamespace(), scaledObject.GetName()) - return err - } - // store ScaledObject's current Generation - r.scaledObjectsGenerations.Delete(key) + if contains(scaledObject.GetFinalizers(), scaledObjectFinalizer) { + // Run finalization logic for scaledObjectFinalizer. If the + // finalization logic fails, don't remove the finalizer so + // that we can retry during the next reconciliation. + if err := r.stopScaleLoop(logger, scaledObject); err != nil { + return err + } - result, ok := r.scaleLoopContexts.Load(key) - if ok { - cancel, ok := result.(context.CancelFunc) - if ok { - cancel() + // Remove scaledObjectFinalizer. Once all finalizers have been + // removed, the object will be deleted. + scaledObject.SetFinalizers(remove(scaledObject.GetFinalizers(), scaledObjectFinalizer)) + if err := r.client.Update(context.TODO(), scaledObject); err != nil { + logger.Error(err, "Failed to update ScaledObject after removing a finalizer", "finalizer", scaledObjectFinalizer) + return err } - r.scaleLoopContexts.Delete(key) - } else { - logger.V(1).Info("ScaleObject was not found in controller cache", "key", key) } logger.Info("Successfully finalized ScaledObject") return nil } -// addFinalizer adds finalizer to the ScaledObject -func (r *ReconcileScaledObject) addFinalizer(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error { - logger.Info("Adding Finalizer for the ScaledObject") - scaledObject.SetFinalizers(append(scaledObject.GetFinalizers(), scaledObjectFinalizer)) +// ensureFinalizer check there is finalizer present on the ScaledObject, if not it adds one +func (r *ReconcileScaledObject) ensureFinalizer(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error { - // Update CR - err := r.client.Update(context.TODO(), scaledObject) - if err != nil { - logger.Error(err, "Failed to update ScaledObject with finalizer") - return err + if !contains(scaledObject.GetFinalizers(), scaledObjectFinalizer) { + logger.Info("Adding Finalizer for the ScaledObject") + scaledObject.SetFinalizers(append(scaledObject.GetFinalizers(), scaledObjectFinalizer)) + + // Update CR + err := r.client.Update(context.TODO(), scaledObject) + if err != nil { + logger.Error(err, "Failed to update ScaledObject with a finalizer", "finalizer", scaledObjectFinalizer) + return err + } } return nil } diff --git a/pkg/controller/util/util.go b/pkg/controller/util/util.go new file mode 100644 index 00000000000..dbd5e63dde3 --- /dev/null +++ b/pkg/controller/util/util.go @@ -0,0 +1,46 @@ +package util + +import ( + "context" + "fmt" + + kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +func SetStatusConditions(client runtimeclient.Client, logger logr.Logger, object interface{}, conditions *kedav1alpha1.Conditions) error { + var patch runtimeclient.Patch + + runtimeObj := object.(runtime.Object) + switch obj := runtimeObj.(type) { + case *kedav1alpha1.ScaledObject: + patch = runtimeclient.MergeFrom(obj.DeepCopy()) + obj.Status.Conditions = *conditions + case *kedav1alpha1.ScaledJob: + patch = runtimeclient.MergeFrom(obj.DeepCopy()) + obj.Status.Conditions = *conditions + default: + err := fmt.Errorf("Unknown scalable object type %v", obj) + logger.Error(err, "Failed to patch Objects Status with Conditions") + return err + } + + err := client.Status().Patch(context.TODO(), runtimeObj, patch) + if err != nil { + logger.Error(err, "Failed to patch Objects Status with Conditions") + } + return err +} + +func UpdateScaledObjectStatus(client runtimeclient.Client, logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, status *kedav1alpha1.ScaledObjectStatus) error { + patch := runtimeclient.MergeFrom(scaledObject.DeepCopy()) + scaledObject.Status = *status + err := client.Status().Patch(context.TODO(), scaledObject, patch) + if err != nil { + logger.Error(err, "Failed to patch ScaledObjects Status") + } + return err +} diff --git a/pkg/generated/clientset/versioned/clientset.go b/pkg/generated/clientset/versioned/clientset.go new file mode 100644 index 00000000000..3cb0c91f18d --- /dev/null +++ b/pkg/generated/clientset/versioned/clientset.go @@ -0,0 +1,96 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + + kedav1alpha1 "github.com/kedacore/keda/pkg/generated/clientset/versioned/typed/keda/v1alpha1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + KedaV1alpha1() kedav1alpha1.KedaV1alpha1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + kedaV1alpha1 *kedav1alpha1.KedaV1alpha1Client +} + +// KedaV1alpha1 retrieves the KedaV1alpha1Client +func (c *Clientset) KedaV1alpha1() kedav1alpha1.KedaV1alpha1Interface { + return c.kedaV1alpha1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.kedaV1alpha1, err = kedav1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.kedaV1alpha1 = kedav1alpha1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.kedaV1alpha1 = kedav1alpha1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/pkg/generated/clientset/versioned/doc.go b/pkg/generated/clientset/versioned/doc.go new file mode 100644 index 00000000000..54893d3eed3 --- /dev/null +++ b/pkg/generated/clientset/versioned/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/pkg/generated/clientset/versioned/fake/clientset_generated.go b/pkg/generated/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 00000000000..f56226cd03b --- /dev/null +++ b/pkg/generated/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,81 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/kedacore/keda/pkg/generated/clientset/versioned" + kedav1alpha1 "github.com/kedacore/keda/pkg/generated/clientset/versioned/typed/keda/v1alpha1" + fakekedav1alpha1 "github.com/kedacore/keda/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var _ clientset.Interface = &Clientset{} + +// KedaV1alpha1 retrieves the KedaV1alpha1Client +func (c *Clientset) KedaV1alpha1() kedav1alpha1.KedaV1alpha1Interface { + return &fakekedav1alpha1.FakeKedaV1alpha1{Fake: &c.Fake} +} diff --git a/pkg/generated/clientset/versioned/fake/doc.go b/pkg/generated/clientset/versioned/fake/doc.go new file mode 100644 index 00000000000..ae870132280 --- /dev/null +++ b/pkg/generated/clientset/versioned/fake/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/pkg/generated/clientset/versioned/fake/register.go b/pkg/generated/clientset/versioned/fake/register.go new file mode 100644 index 00000000000..63ee3f56c19 --- /dev/null +++ b/pkg/generated/clientset/versioned/fake/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) +var parameterCodec = runtime.NewParameterCodec(scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + kedav1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/pkg/generated/clientset/versioned/scheme/doc.go b/pkg/generated/clientset/versioned/scheme/doc.go new file mode 100644 index 00000000000..99cfd499649 --- /dev/null +++ b/pkg/generated/clientset/versioned/scheme/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/pkg/generated/clientset/versioned/scheme/register.go b/pkg/generated/clientset/versioned/scheme/register.go new file mode 100644 index 00000000000..4fd3772086a --- /dev/null +++ b/pkg/generated/clientset/versioned/scheme/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + kedav1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/doc.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/doc.go new file mode 100644 index 00000000000..aade9d0df7d --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/doc.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/doc.go new file mode 100644 index 00000000000..6c063b8e00f --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_keda_client.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_keda_client.go new file mode 100644 index 00000000000..40ff2d3dd99 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_keda_client.go @@ -0,0 +1,47 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/kedacore/keda/pkg/generated/clientset/versioned/typed/keda/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeKedaV1alpha1 struct { + *testing.Fake +} + +func (c *FakeKedaV1alpha1) ScaledJobs(namespace string) v1alpha1.ScaledJobInterface { + return &FakeScaledJobs{c, namespace} +} + +func (c *FakeKedaV1alpha1) ScaledObjects(namespace string) v1alpha1.ScaledObjectInterface { + return &FakeScaledObjects{c, namespace} +} + +func (c *FakeKedaV1alpha1) TriggerAuthentications(namespace string) v1alpha1.TriggerAuthenticationInterface { + return &FakeTriggerAuthentications{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeKedaV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_scaledjob.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_scaledjob.go new file mode 100644 index 00000000000..7a98f5f907c --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_scaledjob.go @@ -0,0 +1,141 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeScaledJobs implements ScaledJobInterface +type FakeScaledJobs struct { + Fake *FakeKedaV1alpha1 + ns string +} + +var scaledjobsResource = schema.GroupVersionResource{Group: "keda.sh", Version: "v1alpha1", Resource: "scaledjobs"} + +var scaledjobsKind = schema.GroupVersionKind{Group: "keda.sh", Version: "v1alpha1", Kind: "ScaledJob"} + +// Get takes name of the scaledJob, and returns the corresponding scaledJob object, and an error if there is any. +func (c *FakeScaledJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ScaledJob, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(scaledjobsResource, c.ns, name), &v1alpha1.ScaledJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ScaledJob), err +} + +// List takes label and field selectors, and returns the list of ScaledJobs that match those selectors. +func (c *FakeScaledJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ScaledJobList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(scaledjobsResource, scaledjobsKind, c.ns, opts), &v1alpha1.ScaledJobList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ScaledJobList{ListMeta: obj.(*v1alpha1.ScaledJobList).ListMeta} + for _, item := range obj.(*v1alpha1.ScaledJobList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested scaledJobs. +func (c *FakeScaledJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(scaledjobsResource, c.ns, opts)) + +} + +// Create takes the representation of a scaledJob and creates it. Returns the server's representation of the scaledJob, and an error, if there is any. +func (c *FakeScaledJobs) Create(ctx context.Context, scaledJob *v1alpha1.ScaledJob, opts v1.CreateOptions) (result *v1alpha1.ScaledJob, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(scaledjobsResource, c.ns, scaledJob), &v1alpha1.ScaledJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ScaledJob), err +} + +// Update takes the representation of a scaledJob and updates it. Returns the server's representation of the scaledJob, and an error, if there is any. +func (c *FakeScaledJobs) Update(ctx context.Context, scaledJob *v1alpha1.ScaledJob, opts v1.UpdateOptions) (result *v1alpha1.ScaledJob, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(scaledjobsResource, c.ns, scaledJob), &v1alpha1.ScaledJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ScaledJob), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeScaledJobs) UpdateStatus(ctx context.Context, scaledJob *v1alpha1.ScaledJob, opts v1.UpdateOptions) (*v1alpha1.ScaledJob, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(scaledjobsResource, "status", c.ns, scaledJob), &v1alpha1.ScaledJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ScaledJob), err +} + +// Delete takes name of the scaledJob and deletes it. Returns an error if one occurs. +func (c *FakeScaledJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(scaledjobsResource, c.ns, name), &v1alpha1.ScaledJob{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeScaledJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(scaledjobsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.ScaledJobList{}) + return err +} + +// Patch applies the patch and returns the patched scaledJob. +func (c *FakeScaledJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ScaledJob, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(scaledjobsResource, c.ns, name, pt, data, subresources...), &v1alpha1.ScaledJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ScaledJob), err +} diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_scaledobject.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_scaledobject.go new file mode 100644 index 00000000000..4c7b909cbf2 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_scaledobject.go @@ -0,0 +1,141 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeScaledObjects implements ScaledObjectInterface +type FakeScaledObjects struct { + Fake *FakeKedaV1alpha1 + ns string +} + +var scaledobjectsResource = schema.GroupVersionResource{Group: "keda.sh", Version: "v1alpha1", Resource: "scaledobjects"} + +var scaledobjectsKind = schema.GroupVersionKind{Group: "keda.sh", Version: "v1alpha1", Kind: "ScaledObject"} + +// Get takes name of the scaledObject, and returns the corresponding scaledObject object, and an error if there is any. +func (c *FakeScaledObjects) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ScaledObject, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(scaledobjectsResource, c.ns, name), &v1alpha1.ScaledObject{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ScaledObject), err +} + +// List takes label and field selectors, and returns the list of ScaledObjects that match those selectors. +func (c *FakeScaledObjects) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ScaledObjectList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(scaledobjectsResource, scaledobjectsKind, c.ns, opts), &v1alpha1.ScaledObjectList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ScaledObjectList{ListMeta: obj.(*v1alpha1.ScaledObjectList).ListMeta} + for _, item := range obj.(*v1alpha1.ScaledObjectList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested scaledObjects. +func (c *FakeScaledObjects) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(scaledobjectsResource, c.ns, opts)) + +} + +// Create takes the representation of a scaledObject and creates it. Returns the server's representation of the scaledObject, and an error, if there is any. +func (c *FakeScaledObjects) Create(ctx context.Context, scaledObject *v1alpha1.ScaledObject, opts v1.CreateOptions) (result *v1alpha1.ScaledObject, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(scaledobjectsResource, c.ns, scaledObject), &v1alpha1.ScaledObject{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ScaledObject), err +} + +// Update takes the representation of a scaledObject and updates it. Returns the server's representation of the scaledObject, and an error, if there is any. +func (c *FakeScaledObjects) Update(ctx context.Context, scaledObject *v1alpha1.ScaledObject, opts v1.UpdateOptions) (result *v1alpha1.ScaledObject, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(scaledobjectsResource, c.ns, scaledObject), &v1alpha1.ScaledObject{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ScaledObject), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeScaledObjects) UpdateStatus(ctx context.Context, scaledObject *v1alpha1.ScaledObject, opts v1.UpdateOptions) (*v1alpha1.ScaledObject, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(scaledobjectsResource, "status", c.ns, scaledObject), &v1alpha1.ScaledObject{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ScaledObject), err +} + +// Delete takes name of the scaledObject and deletes it. Returns an error if one occurs. +func (c *FakeScaledObjects) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(scaledobjectsResource, c.ns, name), &v1alpha1.ScaledObject{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeScaledObjects) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(scaledobjectsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.ScaledObjectList{}) + return err +} + +// Patch applies the patch and returns the patched scaledObject. +func (c *FakeScaledObjects) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ScaledObject, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(scaledobjectsResource, c.ns, name, pt, data, subresources...), &v1alpha1.ScaledObject{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ScaledObject), err +} diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_triggerauthentication.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_triggerauthentication.go new file mode 100644 index 00000000000..47ce3fa4e06 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/fake/fake_triggerauthentication.go @@ -0,0 +1,129 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeTriggerAuthentications implements TriggerAuthenticationInterface +type FakeTriggerAuthentications struct { + Fake *FakeKedaV1alpha1 + ns string +} + +var triggerauthenticationsResource = schema.GroupVersionResource{Group: "keda.sh", Version: "v1alpha1", Resource: "triggerauthentications"} + +var triggerauthenticationsKind = schema.GroupVersionKind{Group: "keda.sh", Version: "v1alpha1", Kind: "TriggerAuthentication"} + +// Get takes name of the triggerAuthentication, and returns the corresponding triggerAuthentication object, and an error if there is any. +func (c *FakeTriggerAuthentications) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.TriggerAuthentication, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(triggerauthenticationsResource, c.ns, name), &v1alpha1.TriggerAuthentication{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.TriggerAuthentication), err +} + +// List takes label and field selectors, and returns the list of TriggerAuthentications that match those selectors. +func (c *FakeTriggerAuthentications) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.TriggerAuthenticationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(triggerauthenticationsResource, triggerauthenticationsKind, c.ns, opts), &v1alpha1.TriggerAuthenticationList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.TriggerAuthenticationList{ListMeta: obj.(*v1alpha1.TriggerAuthenticationList).ListMeta} + for _, item := range obj.(*v1alpha1.TriggerAuthenticationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested triggerAuthentications. +func (c *FakeTriggerAuthentications) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(triggerauthenticationsResource, c.ns, opts)) + +} + +// Create takes the representation of a triggerAuthentication and creates it. Returns the server's representation of the triggerAuthentication, and an error, if there is any. +func (c *FakeTriggerAuthentications) Create(ctx context.Context, triggerAuthentication *v1alpha1.TriggerAuthentication, opts v1.CreateOptions) (result *v1alpha1.TriggerAuthentication, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(triggerauthenticationsResource, c.ns, triggerAuthentication), &v1alpha1.TriggerAuthentication{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.TriggerAuthentication), err +} + +// Update takes the representation of a triggerAuthentication and updates it. Returns the server's representation of the triggerAuthentication, and an error, if there is any. +func (c *FakeTriggerAuthentications) Update(ctx context.Context, triggerAuthentication *v1alpha1.TriggerAuthentication, opts v1.UpdateOptions) (result *v1alpha1.TriggerAuthentication, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(triggerauthenticationsResource, c.ns, triggerAuthentication), &v1alpha1.TriggerAuthentication{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.TriggerAuthentication), err +} + +// Delete takes name of the triggerAuthentication and deletes it. Returns an error if one occurs. +func (c *FakeTriggerAuthentications) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(triggerauthenticationsResource, c.ns, name), &v1alpha1.TriggerAuthentication{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeTriggerAuthentications) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(triggerauthenticationsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.TriggerAuthenticationList{}) + return err +} + +// Patch applies the patch and returns the patched triggerAuthentication. +func (c *FakeTriggerAuthentications) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.TriggerAuthentication, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(triggerauthenticationsResource, c.ns, name, pt, data, subresources...), &v1alpha1.TriggerAuthentication{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.TriggerAuthentication), err +} diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/generated_expansion.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/generated_expansion.go new file mode 100644 index 00000000000..849f7b4aa89 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/generated_expansion.go @@ -0,0 +1,24 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type ScaledJobExpansion interface{} + +type ScaledObjectExpansion interface{} + +type TriggerAuthenticationExpansion interface{} diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/keda_client.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/keda_client.go new file mode 100644 index 00000000000..0eb6a76ab08 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/keda_client.go @@ -0,0 +1,98 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + "github.com/kedacore/keda/pkg/generated/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type KedaV1alpha1Interface interface { + RESTClient() rest.Interface + ScaledJobsGetter + ScaledObjectsGetter + TriggerAuthenticationsGetter +} + +// KedaV1alpha1Client is used to interact with features provided by the keda.sh group. +type KedaV1alpha1Client struct { + restClient rest.Interface +} + +func (c *KedaV1alpha1Client) ScaledJobs(namespace string) ScaledJobInterface { + return newScaledJobs(c, namespace) +} + +func (c *KedaV1alpha1Client) ScaledObjects(namespace string) ScaledObjectInterface { + return newScaledObjects(c, namespace) +} + +func (c *KedaV1alpha1Client) TriggerAuthentications(namespace string) TriggerAuthenticationInterface { + return newTriggerAuthentications(c, namespace) +} + +// NewForConfig creates a new KedaV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*KedaV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &KedaV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new KedaV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *KedaV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new KedaV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *KedaV1alpha1Client { + return &KedaV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *KedaV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/scaledjob.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/scaledjob.go new file mode 100644 index 00000000000..2cb3086a0b0 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/scaledjob.go @@ -0,0 +1,194 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + scheme "github.com/kedacore/keda/pkg/generated/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ScaledJobsGetter has a method to return a ScaledJobInterface. +// A group's client should implement this interface. +type ScaledJobsGetter interface { + ScaledJobs(namespace string) ScaledJobInterface +} + +// ScaledJobInterface has methods to work with ScaledJob resources. +type ScaledJobInterface interface { + Create(ctx context.Context, scaledJob *v1alpha1.ScaledJob, opts v1.CreateOptions) (*v1alpha1.ScaledJob, error) + Update(ctx context.Context, scaledJob *v1alpha1.ScaledJob, opts v1.UpdateOptions) (*v1alpha1.ScaledJob, error) + UpdateStatus(ctx context.Context, scaledJob *v1alpha1.ScaledJob, opts v1.UpdateOptions) (*v1alpha1.ScaledJob, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ScaledJob, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ScaledJobList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ScaledJob, err error) + ScaledJobExpansion +} + +// scaledJobs implements ScaledJobInterface +type scaledJobs struct { + client rest.Interface + ns string +} + +// newScaledJobs returns a ScaledJobs +func newScaledJobs(c *KedaV1alpha1Client, namespace string) *scaledJobs { + return &scaledJobs{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the scaledJob, and returns the corresponding scaledJob object, and an error if there is any. +func (c *scaledJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ScaledJob, err error) { + result = &v1alpha1.ScaledJob{} + err = c.client.Get(). + Namespace(c.ns). + Resource("scaledjobs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ScaledJobs that match those selectors. +func (c *scaledJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ScaledJobList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ScaledJobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("scaledjobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested scaledJobs. +func (c *scaledJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("scaledjobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a scaledJob and creates it. Returns the server's representation of the scaledJob, and an error, if there is any. +func (c *scaledJobs) Create(ctx context.Context, scaledJob *v1alpha1.ScaledJob, opts v1.CreateOptions) (result *v1alpha1.ScaledJob, err error) { + result = &v1alpha1.ScaledJob{} + err = c.client.Post(). + Namespace(c.ns). + Resource("scaledjobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(scaledJob). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a scaledJob and updates it. Returns the server's representation of the scaledJob, and an error, if there is any. +func (c *scaledJobs) Update(ctx context.Context, scaledJob *v1alpha1.ScaledJob, opts v1.UpdateOptions) (result *v1alpha1.ScaledJob, err error) { + result = &v1alpha1.ScaledJob{} + err = c.client.Put(). + Namespace(c.ns). + Resource("scaledjobs"). + Name(scaledJob.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(scaledJob). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *scaledJobs) UpdateStatus(ctx context.Context, scaledJob *v1alpha1.ScaledJob, opts v1.UpdateOptions) (result *v1alpha1.ScaledJob, err error) { + result = &v1alpha1.ScaledJob{} + err = c.client.Put(). + Namespace(c.ns). + Resource("scaledjobs"). + Name(scaledJob.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(scaledJob). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the scaledJob and deletes it. Returns an error if one occurs. +func (c *scaledJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("scaledjobs"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *scaledJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("scaledjobs"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched scaledJob. +func (c *scaledJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ScaledJob, err error) { + result = &v1alpha1.ScaledJob{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("scaledjobs"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/scaledobject.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/scaledobject.go new file mode 100644 index 00000000000..581f92defe1 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/scaledobject.go @@ -0,0 +1,194 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + scheme "github.com/kedacore/keda/pkg/generated/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ScaledObjectsGetter has a method to return a ScaledObjectInterface. +// A group's client should implement this interface. +type ScaledObjectsGetter interface { + ScaledObjects(namespace string) ScaledObjectInterface +} + +// ScaledObjectInterface has methods to work with ScaledObject resources. +type ScaledObjectInterface interface { + Create(ctx context.Context, scaledObject *v1alpha1.ScaledObject, opts v1.CreateOptions) (*v1alpha1.ScaledObject, error) + Update(ctx context.Context, scaledObject *v1alpha1.ScaledObject, opts v1.UpdateOptions) (*v1alpha1.ScaledObject, error) + UpdateStatus(ctx context.Context, scaledObject *v1alpha1.ScaledObject, opts v1.UpdateOptions) (*v1alpha1.ScaledObject, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ScaledObject, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ScaledObjectList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ScaledObject, err error) + ScaledObjectExpansion +} + +// scaledObjects implements ScaledObjectInterface +type scaledObjects struct { + client rest.Interface + ns string +} + +// newScaledObjects returns a ScaledObjects +func newScaledObjects(c *KedaV1alpha1Client, namespace string) *scaledObjects { + return &scaledObjects{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the scaledObject, and returns the corresponding scaledObject object, and an error if there is any. +func (c *scaledObjects) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ScaledObject, err error) { + result = &v1alpha1.ScaledObject{} + err = c.client.Get(). + Namespace(c.ns). + Resource("scaledobjects"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ScaledObjects that match those selectors. +func (c *scaledObjects) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ScaledObjectList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ScaledObjectList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("scaledobjects"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested scaledObjects. +func (c *scaledObjects) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("scaledobjects"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a scaledObject and creates it. Returns the server's representation of the scaledObject, and an error, if there is any. +func (c *scaledObjects) Create(ctx context.Context, scaledObject *v1alpha1.ScaledObject, opts v1.CreateOptions) (result *v1alpha1.ScaledObject, err error) { + result = &v1alpha1.ScaledObject{} + err = c.client.Post(). + Namespace(c.ns). + Resource("scaledobjects"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(scaledObject). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a scaledObject and updates it. Returns the server's representation of the scaledObject, and an error, if there is any. +func (c *scaledObjects) Update(ctx context.Context, scaledObject *v1alpha1.ScaledObject, opts v1.UpdateOptions) (result *v1alpha1.ScaledObject, err error) { + result = &v1alpha1.ScaledObject{} + err = c.client.Put(). + Namespace(c.ns). + Resource("scaledobjects"). + Name(scaledObject.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(scaledObject). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *scaledObjects) UpdateStatus(ctx context.Context, scaledObject *v1alpha1.ScaledObject, opts v1.UpdateOptions) (result *v1alpha1.ScaledObject, err error) { + result = &v1alpha1.ScaledObject{} + err = c.client.Put(). + Namespace(c.ns). + Resource("scaledobjects"). + Name(scaledObject.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(scaledObject). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the scaledObject and deletes it. Returns an error if one occurs. +func (c *scaledObjects) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("scaledobjects"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *scaledObjects) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("scaledobjects"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched scaledObject. +func (c *scaledObjects) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ScaledObject, err error) { + result = &v1alpha1.ScaledObject{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("scaledobjects"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/generated/clientset/versioned/typed/keda/v1alpha1/triggerauthentication.go b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/triggerauthentication.go new file mode 100644 index 00000000000..58be6d943ed --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/keda/v1alpha1/triggerauthentication.go @@ -0,0 +1,177 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + scheme "github.com/kedacore/keda/pkg/generated/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// TriggerAuthenticationsGetter has a method to return a TriggerAuthenticationInterface. +// A group's client should implement this interface. +type TriggerAuthenticationsGetter interface { + TriggerAuthentications(namespace string) TriggerAuthenticationInterface +} + +// TriggerAuthenticationInterface has methods to work with TriggerAuthentication resources. +type TriggerAuthenticationInterface interface { + Create(ctx context.Context, triggerAuthentication *v1alpha1.TriggerAuthentication, opts v1.CreateOptions) (*v1alpha1.TriggerAuthentication, error) + Update(ctx context.Context, triggerAuthentication *v1alpha1.TriggerAuthentication, opts v1.UpdateOptions) (*v1alpha1.TriggerAuthentication, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.TriggerAuthentication, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.TriggerAuthenticationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.TriggerAuthentication, err error) + TriggerAuthenticationExpansion +} + +// triggerAuthentications implements TriggerAuthenticationInterface +type triggerAuthentications struct { + client rest.Interface + ns string +} + +// newTriggerAuthentications returns a TriggerAuthentications +func newTriggerAuthentications(c *KedaV1alpha1Client, namespace string) *triggerAuthentications { + return &triggerAuthentications{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the triggerAuthentication, and returns the corresponding triggerAuthentication object, and an error if there is any. +func (c *triggerAuthentications) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.TriggerAuthentication, err error) { + result = &v1alpha1.TriggerAuthentication{} + err = c.client.Get(). + Namespace(c.ns). + Resource("triggerauthentications"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of TriggerAuthentications that match those selectors. +func (c *triggerAuthentications) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.TriggerAuthenticationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.TriggerAuthenticationList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("triggerauthentications"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested triggerAuthentications. +func (c *triggerAuthentications) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("triggerauthentications"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a triggerAuthentication and creates it. Returns the server's representation of the triggerAuthentication, and an error, if there is any. +func (c *triggerAuthentications) Create(ctx context.Context, triggerAuthentication *v1alpha1.TriggerAuthentication, opts v1.CreateOptions) (result *v1alpha1.TriggerAuthentication, err error) { + result = &v1alpha1.TriggerAuthentication{} + err = c.client.Post(). + Namespace(c.ns). + Resource("triggerauthentications"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(triggerAuthentication). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a triggerAuthentication and updates it. Returns the server's representation of the triggerAuthentication, and an error, if there is any. +func (c *triggerAuthentications) Update(ctx context.Context, triggerAuthentication *v1alpha1.TriggerAuthentication, opts v1.UpdateOptions) (result *v1alpha1.TriggerAuthentication, err error) { + result = &v1alpha1.TriggerAuthentication{} + err = c.client.Put(). + Namespace(c.ns). + Resource("triggerauthentications"). + Name(triggerAuthentication.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(triggerAuthentication). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the triggerAuthentication and deletes it. Returns an error if one occurs. +func (c *triggerAuthentications) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("triggerauthentications"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *triggerAuthentications) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("triggerauthentications"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched triggerAuthentication. +func (c *triggerAuthentications) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.TriggerAuthentication, err error) { + result = &v1alpha1.TriggerAuthentication{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("triggerauthentications"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/generated/informers/externalversions/factory.go b/pkg/generated/informers/externalversions/factory.go new file mode 100644 index 00000000000..4bbd90c5c7a --- /dev/null +++ b/pkg/generated/informers/externalversions/factory.go @@ -0,0 +1,179 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/kedacore/keda/pkg/generated/clientset/versioned" + internalinterfaces "github.com/kedacore/keda/pkg/generated/informers/externalversions/internalinterfaces" + keda "github.com/kedacore/keda/pkg/generated/informers/externalversions/keda" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Keda() keda.Interface +} + +func (f *sharedInformerFactory) Keda() keda.Interface { + return keda.New(f, f.namespace, f.tweakListOptions) +} diff --git a/pkg/generated/informers/externalversions/generic.go b/pkg/generated/informers/externalversions/generic.go new file mode 100644 index 00000000000..197b9986d7e --- /dev/null +++ b/pkg/generated/informers/externalversions/generic.go @@ -0,0 +1,65 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=keda.sh, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("scaledjobs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Keda().V1alpha1().ScaledJobs().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("scaledobjects"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Keda().V1alpha1().ScaledObjects().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("triggerauthentications"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Keda().V1alpha1().TriggerAuthentications().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 00000000000..ec46744a221 --- /dev/null +++ b/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,39 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/kedacore/keda/pkg/generated/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/pkg/generated/informers/externalversions/keda/interface.go b/pkg/generated/informers/externalversions/keda/interface.go new file mode 100644 index 00000000000..bd12d16bb5c --- /dev/null +++ b/pkg/generated/informers/externalversions/keda/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package keda + +import ( + internalinterfaces "github.com/kedacore/keda/pkg/generated/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/kedacore/keda/pkg/generated/informers/externalversions/keda/v1alpha1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/pkg/generated/informers/externalversions/keda/v1alpha1/interface.go b/pkg/generated/informers/externalversions/keda/v1alpha1/interface.go new file mode 100644 index 00000000000..fb90dbd9940 --- /dev/null +++ b/pkg/generated/informers/externalversions/keda/v1alpha1/interface.go @@ -0,0 +1,58 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "github.com/kedacore/keda/pkg/generated/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ScaledJobs returns a ScaledJobInformer. + ScaledJobs() ScaledJobInformer + // ScaledObjects returns a ScaledObjectInformer. + ScaledObjects() ScaledObjectInformer + // TriggerAuthentications returns a TriggerAuthenticationInformer. + TriggerAuthentications() TriggerAuthenticationInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ScaledJobs returns a ScaledJobInformer. +func (v *version) ScaledJobs() ScaledJobInformer { + return &scaledJobInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ScaledObjects returns a ScaledObjectInformer. +func (v *version) ScaledObjects() ScaledObjectInformer { + return &scaledObjectInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// TriggerAuthentications returns a TriggerAuthenticationInformer. +func (v *version) TriggerAuthentications() TriggerAuthenticationInformer { + return &triggerAuthenticationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/pkg/generated/informers/externalversions/keda/v1alpha1/scaledjob.go b/pkg/generated/informers/externalversions/keda/v1alpha1/scaledjob.go new file mode 100644 index 00000000000..91b7bd59fd1 --- /dev/null +++ b/pkg/generated/informers/externalversions/keda/v1alpha1/scaledjob.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + versioned "github.com/kedacore/keda/pkg/generated/clientset/versioned" + internalinterfaces "github.com/kedacore/keda/pkg/generated/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/kedacore/keda/pkg/generated/listers/keda/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ScaledJobInformer provides access to a shared informer and lister for +// ScaledJobs. +type ScaledJobInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ScaledJobLister +} + +type scaledJobInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewScaledJobInformer constructs a new informer for ScaledJob type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewScaledJobInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredScaledJobInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredScaledJobInformer constructs a new informer for ScaledJob type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredScaledJobInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KedaV1alpha1().ScaledJobs(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KedaV1alpha1().ScaledJobs(namespace).Watch(context.TODO(), options) + }, + }, + &kedav1alpha1.ScaledJob{}, + resyncPeriod, + indexers, + ) +} + +func (f *scaledJobInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredScaledJobInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *scaledJobInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&kedav1alpha1.ScaledJob{}, f.defaultInformer) +} + +func (f *scaledJobInformer) Lister() v1alpha1.ScaledJobLister { + return v1alpha1.NewScaledJobLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/informers/externalversions/keda/v1alpha1/scaledobject.go b/pkg/generated/informers/externalversions/keda/v1alpha1/scaledobject.go new file mode 100644 index 00000000000..d6dcc3e0819 --- /dev/null +++ b/pkg/generated/informers/externalversions/keda/v1alpha1/scaledobject.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + versioned "github.com/kedacore/keda/pkg/generated/clientset/versioned" + internalinterfaces "github.com/kedacore/keda/pkg/generated/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/kedacore/keda/pkg/generated/listers/keda/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ScaledObjectInformer provides access to a shared informer and lister for +// ScaledObjects. +type ScaledObjectInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ScaledObjectLister +} + +type scaledObjectInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewScaledObjectInformer constructs a new informer for ScaledObject type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewScaledObjectInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredScaledObjectInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredScaledObjectInformer constructs a new informer for ScaledObject type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredScaledObjectInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KedaV1alpha1().ScaledObjects(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KedaV1alpha1().ScaledObjects(namespace).Watch(context.TODO(), options) + }, + }, + &kedav1alpha1.ScaledObject{}, + resyncPeriod, + indexers, + ) +} + +func (f *scaledObjectInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredScaledObjectInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *scaledObjectInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&kedav1alpha1.ScaledObject{}, f.defaultInformer) +} + +func (f *scaledObjectInformer) Lister() v1alpha1.ScaledObjectLister { + return v1alpha1.NewScaledObjectLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/informers/externalversions/keda/v1alpha1/triggerauthentication.go b/pkg/generated/informers/externalversions/keda/v1alpha1/triggerauthentication.go new file mode 100644 index 00000000000..2f2cf26fe8f --- /dev/null +++ b/pkg/generated/informers/externalversions/keda/v1alpha1/triggerauthentication.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + versioned "github.com/kedacore/keda/pkg/generated/clientset/versioned" + internalinterfaces "github.com/kedacore/keda/pkg/generated/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/kedacore/keda/pkg/generated/listers/keda/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// TriggerAuthenticationInformer provides access to a shared informer and lister for +// TriggerAuthentications. +type TriggerAuthenticationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.TriggerAuthenticationLister +} + +type triggerAuthenticationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewTriggerAuthenticationInformer constructs a new informer for TriggerAuthentication type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewTriggerAuthenticationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredTriggerAuthenticationInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredTriggerAuthenticationInformer constructs a new informer for TriggerAuthentication type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredTriggerAuthenticationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KedaV1alpha1().TriggerAuthentications(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KedaV1alpha1().TriggerAuthentications(namespace).Watch(context.TODO(), options) + }, + }, + &kedav1alpha1.TriggerAuthentication{}, + resyncPeriod, + indexers, + ) +} + +func (f *triggerAuthenticationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredTriggerAuthenticationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *triggerAuthenticationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&kedav1alpha1.TriggerAuthentication{}, f.defaultInformer) +} + +func (f *triggerAuthenticationInformer) Lister() v1alpha1.TriggerAuthenticationLister { + return v1alpha1.NewTriggerAuthenticationLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/listers/keda/v1alpha1/expansion_generated.go b/pkg/generated/listers/keda/v1alpha1/expansion_generated.go new file mode 100644 index 00000000000..cc12eba668b --- /dev/null +++ b/pkg/generated/listers/keda/v1alpha1/expansion_generated.go @@ -0,0 +1,42 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// ScaledJobListerExpansion allows custom methods to be added to +// ScaledJobLister. +type ScaledJobListerExpansion interface{} + +// ScaledJobNamespaceListerExpansion allows custom methods to be added to +// ScaledJobNamespaceLister. +type ScaledJobNamespaceListerExpansion interface{} + +// ScaledObjectListerExpansion allows custom methods to be added to +// ScaledObjectLister. +type ScaledObjectListerExpansion interface{} + +// ScaledObjectNamespaceListerExpansion allows custom methods to be added to +// ScaledObjectNamespaceLister. +type ScaledObjectNamespaceListerExpansion interface{} + +// TriggerAuthenticationListerExpansion allows custom methods to be added to +// TriggerAuthenticationLister. +type TriggerAuthenticationListerExpansion interface{} + +// TriggerAuthenticationNamespaceListerExpansion allows custom methods to be added to +// TriggerAuthenticationNamespaceLister. +type TriggerAuthenticationNamespaceListerExpansion interface{} diff --git a/pkg/generated/listers/keda/v1alpha1/scaledjob.go b/pkg/generated/listers/keda/v1alpha1/scaledjob.go new file mode 100644 index 00000000000..25708333b49 --- /dev/null +++ b/pkg/generated/listers/keda/v1alpha1/scaledjob.go @@ -0,0 +1,93 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ScaledJobLister helps list ScaledJobs. +type ScaledJobLister interface { + // List lists all ScaledJobs in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.ScaledJob, err error) + // ScaledJobs returns an object that can list and get ScaledJobs. + ScaledJobs(namespace string) ScaledJobNamespaceLister + ScaledJobListerExpansion +} + +// scaledJobLister implements the ScaledJobLister interface. +type scaledJobLister struct { + indexer cache.Indexer +} + +// NewScaledJobLister returns a new ScaledJobLister. +func NewScaledJobLister(indexer cache.Indexer) ScaledJobLister { + return &scaledJobLister{indexer: indexer} +} + +// List lists all ScaledJobs in the indexer. +func (s *scaledJobLister) List(selector labels.Selector) (ret []*v1alpha1.ScaledJob, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ScaledJob)) + }) + return ret, err +} + +// ScaledJobs returns an object that can list and get ScaledJobs. +func (s *scaledJobLister) ScaledJobs(namespace string) ScaledJobNamespaceLister { + return scaledJobNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ScaledJobNamespaceLister helps list and get ScaledJobs. +type ScaledJobNamespaceLister interface { + // List lists all ScaledJobs in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.ScaledJob, err error) + // Get retrieves the ScaledJob from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.ScaledJob, error) + ScaledJobNamespaceListerExpansion +} + +// scaledJobNamespaceLister implements the ScaledJobNamespaceLister +// interface. +type scaledJobNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ScaledJobs in the indexer for a given namespace. +func (s scaledJobNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ScaledJob, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ScaledJob)) + }) + return ret, err +} + +// Get retrieves the ScaledJob from the indexer for a given namespace and name. +func (s scaledJobNamespaceLister) Get(name string) (*v1alpha1.ScaledJob, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("scaledjob"), name) + } + return obj.(*v1alpha1.ScaledJob), nil +} diff --git a/pkg/generated/listers/keda/v1alpha1/scaledobject.go b/pkg/generated/listers/keda/v1alpha1/scaledobject.go new file mode 100644 index 00000000000..290a5248229 --- /dev/null +++ b/pkg/generated/listers/keda/v1alpha1/scaledobject.go @@ -0,0 +1,93 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ScaledObjectLister helps list ScaledObjects. +type ScaledObjectLister interface { + // List lists all ScaledObjects in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.ScaledObject, err error) + // ScaledObjects returns an object that can list and get ScaledObjects. + ScaledObjects(namespace string) ScaledObjectNamespaceLister + ScaledObjectListerExpansion +} + +// scaledObjectLister implements the ScaledObjectLister interface. +type scaledObjectLister struct { + indexer cache.Indexer +} + +// NewScaledObjectLister returns a new ScaledObjectLister. +func NewScaledObjectLister(indexer cache.Indexer) ScaledObjectLister { + return &scaledObjectLister{indexer: indexer} +} + +// List lists all ScaledObjects in the indexer. +func (s *scaledObjectLister) List(selector labels.Selector) (ret []*v1alpha1.ScaledObject, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ScaledObject)) + }) + return ret, err +} + +// ScaledObjects returns an object that can list and get ScaledObjects. +func (s *scaledObjectLister) ScaledObjects(namespace string) ScaledObjectNamespaceLister { + return scaledObjectNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ScaledObjectNamespaceLister helps list and get ScaledObjects. +type ScaledObjectNamespaceLister interface { + // List lists all ScaledObjects in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.ScaledObject, err error) + // Get retrieves the ScaledObject from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.ScaledObject, error) + ScaledObjectNamespaceListerExpansion +} + +// scaledObjectNamespaceLister implements the ScaledObjectNamespaceLister +// interface. +type scaledObjectNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ScaledObjects in the indexer for a given namespace. +func (s scaledObjectNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ScaledObject, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ScaledObject)) + }) + return ret, err +} + +// Get retrieves the ScaledObject from the indexer for a given namespace and name. +func (s scaledObjectNamespaceLister) Get(name string) (*v1alpha1.ScaledObject, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("scaledobject"), name) + } + return obj.(*v1alpha1.ScaledObject), nil +} diff --git a/pkg/generated/listers/keda/v1alpha1/triggerauthentication.go b/pkg/generated/listers/keda/v1alpha1/triggerauthentication.go new file mode 100644 index 00000000000..ebeb909bd1d --- /dev/null +++ b/pkg/generated/listers/keda/v1alpha1/triggerauthentication.go @@ -0,0 +1,93 @@ +/* +Copyright 2020 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// TriggerAuthenticationLister helps list TriggerAuthentications. +type TriggerAuthenticationLister interface { + // List lists all TriggerAuthentications in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.TriggerAuthentication, err error) + // TriggerAuthentications returns an object that can list and get TriggerAuthentications. + TriggerAuthentications(namespace string) TriggerAuthenticationNamespaceLister + TriggerAuthenticationListerExpansion +} + +// triggerAuthenticationLister implements the TriggerAuthenticationLister interface. +type triggerAuthenticationLister struct { + indexer cache.Indexer +} + +// NewTriggerAuthenticationLister returns a new TriggerAuthenticationLister. +func NewTriggerAuthenticationLister(indexer cache.Indexer) TriggerAuthenticationLister { + return &triggerAuthenticationLister{indexer: indexer} +} + +// List lists all TriggerAuthentications in the indexer. +func (s *triggerAuthenticationLister) List(selector labels.Selector) (ret []*v1alpha1.TriggerAuthentication, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.TriggerAuthentication)) + }) + return ret, err +} + +// TriggerAuthentications returns an object that can list and get TriggerAuthentications. +func (s *triggerAuthenticationLister) TriggerAuthentications(namespace string) TriggerAuthenticationNamespaceLister { + return triggerAuthenticationNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// TriggerAuthenticationNamespaceLister helps list and get TriggerAuthentications. +type TriggerAuthenticationNamespaceLister interface { + // List lists all TriggerAuthentications in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.TriggerAuthentication, err error) + // Get retrieves the TriggerAuthentication from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.TriggerAuthentication, error) + TriggerAuthenticationNamespaceListerExpansion +} + +// triggerAuthenticationNamespaceLister implements the TriggerAuthenticationNamespaceLister +// interface. +type triggerAuthenticationNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all TriggerAuthentications in the indexer for a given namespace. +func (s triggerAuthenticationNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.TriggerAuthentication, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.TriggerAuthentication)) + }) + return ret, err +} + +// Get retrieves the TriggerAuthentication from the indexer for a given namespace and name. +func (s triggerAuthenticationNamespaceLister) Get(name string) (*v1alpha1.TriggerAuthentication, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("triggerauthentication"), name) + } + return obj.(*v1alpha1.TriggerAuthentication), nil +} diff --git a/pkg/handler/scale_deployments.go b/pkg/handler/scale_deployments.go deleted file mode 100644 index ddb83b4c6d7..00000000000 --- a/pkg/handler/scale_deployments.go +++ /dev/null @@ -1,156 +0,0 @@ -package handler - -import ( - "context" - "fmt" - "time" - - kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/cache" -) - -func (h *ScaleHandler) scaleDeployment(deployment *appsv1.Deployment, scaledObject *kedav1alpha1.ScaledObject, isActive bool) { - - if *deployment.Spec.Replicas == 0 && isActive { - // current replica count is 0, but there is an active trigger. - // scale the deployment up - h.scaleFromZero(deployment, scaledObject) - } else if !isActive && - *deployment.Spec.Replicas > 0 && - (scaledObject.Spec.MinReplicaCount == nil || *scaledObject.Spec.MinReplicaCount == 0) { - // there are no active triggers, but the deployment has replicas. - // AND - // There is no minimum configured or minimum is set to ZERO. HPA will handles other scale down operations - - // Try to scale it down. - h.scaleDeploymentToZero(deployment, scaledObject) - } else if !isActive && - scaledObject.Spec.MinReplicaCount != nil && - *deployment.Spec.Replicas < *scaledObject.Spec.MinReplicaCount { - // there are no active triggers - // AND - // deployment replicas count is less than minimum replica count specified in ScaledObject - // Let's set deployment replicas count to correct value - *deployment.Spec.Replicas = *scaledObject.Spec.MinReplicaCount - - err := h.updateDeployment(deployment) - if err == nil { - h.logger.Info("Successfully set Deployment replicas count to ScaledObject minReplicaCount", "Deployment.Namespace", deployment.GetNamespace(), "Deployment.Name", deployment.GetName(), "Deployment.Replicas", *deployment.Spec.Replicas) - } - } else if isActive { - // triggers are active, but we didn't need to scale (replica count > 0) - // Update LastActiveTime to now. - now := metav1.Now() - scaledObject.Status.LastActiveTime = &now - h.updateScaledObjectStatus(scaledObject) - } else { - h.logger.V(1).Info("Deployment no change", "Deployment.Namespace", deployment.GetNamespace(), "Deployment.Name", deployment.GetName()) - } -} - -func (h *ScaleHandler) updateDeployment(deployment *appsv1.Deployment) error { - - err := h.client.Update(context.TODO(), deployment) - if err != nil { - h.logger.Error(err, "Error updating deployment", "Deployment.Namespace", deployment.GetNamespace(), "Deployment.Name", deployment.GetName()) - return err - } - return nil -} - -// A deployment will be scaled down to 0 only if it's passed its cooldown period -// or if LastActiveTime is nil -func (h *ScaleHandler) scaleDeploymentToZero(deployment *appsv1.Deployment, scaledObject *kedav1alpha1.ScaledObject) { - var cooldownPeriod time.Duration - - if scaledObject.Spec.CooldownPeriod != nil { - cooldownPeriod = time.Second * time.Duration(*scaledObject.Spec.CooldownPeriod) - } else { - cooldownPeriod = time.Second * time.Duration(defaultCooldownPeriod) - } - - // LastActiveTime can be nil if the deployment was scaled outside of Keda. - // In this case we will ignore the cooldown period and scale it down - if scaledObject.Status.LastActiveTime == nil || - scaledObject.Status.LastActiveTime.Add(cooldownPeriod).Before(time.Now()) { - // or last time a trigger was active was > cooldown period, so scale down. - *deployment.Spec.Replicas = 0 - err := h.updateDeployment(deployment) - if err == nil { - h.logger.Info("Successfully scaled deployment to 0 replicas", "Deployment.Namespace", deployment.GetNamespace(), "Deployment.Name", deployment.GetName()) - } - } else { - h.logger.V(1).Info("scaledObject cooling down", - "LastActiveTime", - scaledObject.Status.LastActiveTime, - "CoolDownPeriod", - cooldownPeriod) - } -} - -func (h *ScaleHandler) scaleFromZero(deployment *appsv1.Deployment, scaledObject *kedav1alpha1.ScaledObject) { - currentReplicas := *deployment.Spec.Replicas - if scaledObject.Spec.MinReplicaCount != nil && *scaledObject.Spec.MinReplicaCount > 0 { - deployment.Spec.Replicas = scaledObject.Spec.MinReplicaCount - } else { - *deployment.Spec.Replicas = 1 - } - - err := h.updateDeployment(deployment) - - if err == nil { - h.logger.Info("Successfully updated deployment", "Deployment.Namespace", deployment.GetNamespace(), "Deployment.Name", deployment.GetName(), - "Original Replicas Count", - currentReplicas, - "New Replicas Count", - *deployment.Spec.Replicas) - - // Scale was successful. Update lastScaleTime and lastActiveTime on the scaledObject - now := metav1.Now() - scaledObject.Status.LastActiveTime = &now - h.updateScaledObjectStatus(scaledObject) - } -} - -func (h *ScaleHandler) resolveDeploymentEnv(deployment *appsv1.Deployment, containerName string) (map[string]string, error) { - deploymentKey, err := cache.MetaNamespaceKeyFunc(deployment) - if err != nil { - return nil, err - } - - if len(deployment.Spec.Template.Spec.Containers) < 1 { - return nil, fmt.Errorf("Deployment (%s) doesn't have containers", deploymentKey) - } - - var container corev1.Container - - if containerName != "" { - for _, c := range deployment.Spec.Template.Spec.Containers { - if c.Name == containerName { - container = c - break - } - } - - if &container == nil { - return nil, fmt.Errorf("Couldn't find container with name %s on deployment %s", containerName, deployment.GetName()) - } - } else { - container = deployment.Spec.Template.Spec.Containers[0] - } - - return h.resolveEnv(&container, deployment.GetNamespace()) -} - -func (h *ScaleHandler) parseDeploymentAuthRef(triggerAuthRef *kedav1alpha1.ScaledObjectAuthRef, scaledObject *kedav1alpha1.ScaledObject, deployment *appsv1.Deployment) (map[string]string, string) { - return h.parseAuthRef(triggerAuthRef, scaledObject, func(name, containerName string) string { - env, err := h.resolveDeploymentEnv(deployment, containerName) - if err != nil { - return "" - } - return env[name] - }) -} diff --git a/pkg/handler/scale_handler.go b/pkg/handler/scale_handler.go deleted file mode 100644 index d889426bcb3..00000000000 --- a/pkg/handler/scale_handler.go +++ /dev/null @@ -1,398 +0,0 @@ -package handler - -import ( - "context" - "fmt" - - kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" - "github.com/kedacore/keda/pkg/scalers" - - "github.com/go-logr/logr" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - logf "sigs.k8s.io/controller-runtime/pkg/log" -) - -// ScaleHandler encapsulates the logic of calling the right scalers for -// each ScaledObject and making the final scale decision and operation -type ScaleHandler struct { - client client.Client - logger logr.Logger - reconcilerScheme *runtime.Scheme -} - -const ( - // Default polling interval for a ScaledObject triggers if no pollingInterval is defined. - defaultPollingInterval = 30 - // Default cooldown period for a deployment if no cooldownPeriod is defined on the scaledObject - defaultCooldownPeriod = 5 * 60 // 5 minutes -) - -// NewScaleHandler creates a ScaleHandler object -func NewScaleHandler(client client.Client, reconcilerScheme *runtime.Scheme) *ScaleHandler { - handler := &ScaleHandler{ - client: client, - logger: logf.Log.WithName("scalehandler"), - reconcilerScheme: reconcilerScheme, - } - return handler -} - -func (h *ScaleHandler) updateScaledObjectStatus(scaledObject *kedav1alpha1.ScaledObject) error { - err := h.client.Status().Update(context.TODO(), scaledObject) - if err != nil { - if errors.IsConflict(err) { - // ScaledObject's metadata that are not necessary to restart the ScaleLoop were updated (eg. labels) - // we should try to fetch the scaledObject again and process the update once again - h.logger.V(1).Info("Trying to fetch updated version of ScaledObject to properly update it's Status") - updatedScaledObject := &kedav1alpha1.ScaledObject{} - err2 := h.client.Get(context.TODO(), types.NamespacedName{Name: scaledObject.Name, Namespace: scaledObject.Namespace}, updatedScaledObject) - if err2 != nil { - h.logger.Error(err2, "Error getting updated version of ScaledObject before updating it's Status") - } else { - scaledObject = updatedScaledObject - if h.client.Status().Update(context.TODO(), scaledObject) == nil { - h.logger.V(1).Info("ScaledObject's Status was properly updated on re-fetched ScaledObject") - return nil - } - } - } - // we got another type of error - h.logger.Error(err, "Error updating scaledObject status") - return err - } - h.logger.V(1).Info("ScaledObject's Status was properly updated") - return nil -} - -func (h *ScaleHandler) resolveEnv(container *corev1.Container, namespace string) (map[string]string, error) { - resolved := make(map[string]string) - - if container.EnvFrom != nil { - for _, source := range container.EnvFrom { - if source.ConfigMapRef != nil { - if configMap, err := h.resolveConfigMap(source.ConfigMapRef, namespace); err == nil { - for k, v := range configMap { - resolved[k] = v - } - } else if source.ConfigMapRef.Optional != nil && *source.ConfigMapRef.Optional { - // ignore error when ConfigMap is marked as optional - continue - } else { - return nil, fmt.Errorf("error reading config ref %s on namespace %s/: %s", source.ConfigMapRef, namespace, err) - } - } else if source.SecretRef != nil { - if secretsMap, err := h.resolveSecretMap(source.SecretRef, namespace); err == nil { - for k, v := range secretsMap { - resolved[k] = v - } - } else if source.SecretRef.Optional != nil && *source.SecretRef.Optional { - // ignore error when Secret is marked as optional - continue - } else { - return nil, fmt.Errorf("error reading secret ref %s on namespace %s: %s", source.SecretRef, namespace, err) - } - } - } - - } - - if container.Env != nil { - for _, envVar := range container.Env { - var value string - var err error - - // env is either a name/value pair or an EnvVarSource - if envVar.Value != "" { - value = envVar.Value - } else if envVar.ValueFrom != nil { - // env is an EnvVarSource, that can be on of the 4 below - if envVar.ValueFrom.SecretKeyRef != nil { - // env is a secret selector - value, err = h.resolveSecretValue(envVar.ValueFrom.SecretKeyRef, envVar.ValueFrom.SecretKeyRef.Key, namespace) - if err != nil { - return nil, fmt.Errorf("error resolving secret name %s for env %s in namespace %s", - envVar.ValueFrom.SecretKeyRef, - envVar.Name, - namespace) - } - } else if envVar.ValueFrom.ConfigMapKeyRef != nil { - // env is a configMap selector - value, err = h.resolveConfigValue(envVar.ValueFrom.ConfigMapKeyRef, envVar.ValueFrom.ConfigMapKeyRef.Key, namespace) - if err != nil { - return nil, fmt.Errorf("error resolving config %s for env %s in namespace %s", - envVar.ValueFrom.ConfigMapKeyRef, - envVar.Name, - namespace) - } - } else { - h.logger.V(1).Info("cannot resolve env %s to a value. fieldRef and resourceFieldRef env are skipped", envVar.Name) - continue - } - - } - resolved[envVar.Name] = value - } - - } - - return resolved, nil -} - -func (h *ScaleHandler) resolveConfigMap(configMapRef *corev1.ConfigMapEnvSource, namespace string) (map[string]string, error) { - configMap := &corev1.ConfigMap{} - err := h.client.Get(context.TODO(), types.NamespacedName{Name: configMapRef.Name, Namespace: namespace}, configMap) - if err != nil { - return nil, err - } - return configMap.Data, nil -} - -func (h *ScaleHandler) resolveSecretMap(secretMapRef *corev1.SecretEnvSource, namespace string) (map[string]string, error) { - secret := &corev1.Secret{} - err := h.client.Get(context.TODO(), types.NamespacedName{Name: secretMapRef.Name, Namespace: namespace}, secret) - if err != nil { - return nil, err - } - - secretsStr := make(map[string]string) - for k, v := range secret.Data { - secretsStr[k] = string(v) - } - return secretsStr, nil -} - -func (h *ScaleHandler) resolveSecretValue(secretKeyRef *corev1.SecretKeySelector, keyName, namespace string) (string, error) { - secret := &corev1.Secret{} - err := h.client.Get(context.TODO(), types.NamespacedName{Name: secretKeyRef.Name, Namespace: namespace}, secret) - if err != nil { - return "", err - } - return string(secret.Data[keyName]), nil - -} - -func (h *ScaleHandler) resolveConfigValue(configKeyRef *corev1.ConfigMapKeySelector, keyName, namespace string) (string, error) { - configMap := &corev1.ConfigMap{} - err := h.client.Get(context.TODO(), types.NamespacedName{Name: configKeyRef.Name, Namespace: namespace}, configMap) - if err != nil { - return "", err - } - return string(configMap.Data[keyName]), nil -} - -func closeScalers(scalers []scalers.Scaler) { - for _, scaler := range scalers { - defer scaler.Close() - } -} - -// GetDeploymentScalers returns list of Scalers and Deployment for the specified ScaledObject -func (h *ScaleHandler) GetDeploymentScalers(scaledObject *kedav1alpha1.ScaledObject) ([]scalers.Scaler, *appsv1.Deployment, error) { - scalersRes := []scalers.Scaler{} - - deploymentName := scaledObject.Spec.ScaleTargetRef.DeploymentName - if deploymentName == "" { - return scalersRes, nil, fmt.Errorf("notified about ScaledObject with missing deployment name: %s", scaledObject.GetName()) - } - - deployment := &appsv1.Deployment{} - err := h.client.Get(context.TODO(), types.NamespacedName{Name: deploymentName, Namespace: scaledObject.GetNamespace()}, deployment) - if err != nil { - return scalersRes, nil, fmt.Errorf("error getting deployment: %s", err) - } - - resolvedEnv, err := h.resolveDeploymentEnv(deployment, scaledObject.Spec.ScaleTargetRef.ContainerName) - if err != nil { - return scalersRes, nil, fmt.Errorf("error resolving secrets for deployment: %s", err) - } - - for i, trigger := range scaledObject.Spec.Triggers { - authParams, podIdentity := h.parseDeploymentAuthRef(trigger.AuthenticationRef, scaledObject, deployment) - - if podIdentity == kedav1alpha1.PodIdentityProviderAwsEKS { - serviceAccountName := deployment.Spec.Template.Spec.ServiceAccountName - serviceAccount := &v1.ServiceAccount{} - err = h.client.Get(context.TODO(), types.NamespacedName{Name: serviceAccountName, Namespace: scaledObject.GetNamespace()}, serviceAccount) - if err != nil { - closeScalers(scalersRes) - return []scalers.Scaler{}, nil, fmt.Errorf("error getting service account: %s", err) - } - authParams["awsRoleArn"] = serviceAccount.Annotations[kedav1alpha1.PodIdentityAnnotationEKS] - } else if podIdentity == kedav1alpha1.PodIdentityProviderAwsKiam { - authParams["awsRoleArn"] = deployment.Spec.Template.ObjectMeta.Annotations[kedav1alpha1.PodIdentityAnnotationKiam] - } - - scaler, err := h.getScaler(scaledObject.Name, scaledObject.Namespace, trigger.Type, resolvedEnv, trigger.Metadata, authParams, podIdentity) - if err != nil { - closeScalers(scalersRes) - return []scalers.Scaler{}, nil, fmt.Errorf("error getting scaler for trigger #%d: %s", i, err) - } - - scalersRes = append(scalersRes, scaler) - } - - return scalersRes, deployment, nil -} - -func (h *ScaleHandler) getJobScalers(scaledObject *kedav1alpha1.ScaledObject) ([]scalers.Scaler, error) { - scalersRes := []scalers.Scaler{} - - resolvedEnv, err := h.resolveJobEnv(scaledObject) - if err != nil { - return scalersRes, fmt.Errorf("error resolving secrets for job: %s", err) - } - - for i, trigger := range scaledObject.Spec.Triggers { - authParams, podIdentity := h.parseJobAuthRef(trigger.AuthenticationRef, scaledObject) - scaler, err := h.getScaler(scaledObject.Name, scaledObject.Namespace, trigger.Type, resolvedEnv, trigger.Metadata, authParams, podIdentity) - if err != nil { - closeScalers(scalersRes) - return []scalers.Scaler{}, fmt.Errorf("error getting scaler for trigger #%d: %s", i, err) - } - - scalersRes = append(scalersRes, scaler) - } - - return scalersRes, nil -} - -func (h *ScaleHandler) resolveAuthSecret(name, namespace, key string) string { - if name == "" || namespace == "" || key == "" { - h.logger.Error(fmt.Errorf("Error trying to get secret"), "name, namespace and key are required", "Secret.Namespace", namespace, "Secret.Name", name, "key", key) - return "" - } - - secret := &corev1.Secret{} - err := h.client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, secret) - if err != nil { - h.logger.Error(err, "Error trying to get secret from namespace", "Secret.Namespace", namespace, "Secret.Name", name) - return "" - } - result := secret.Data[key] - - if result == nil { - return "" - } - - return string(result) -} - -func (h *ScaleHandler) resolveVaultSecret(data map[string]interface{}, key string) string { - if v2Data, ok := data["data"].(map[string]interface{}); ok { - if value, ok := v2Data[key]; ok { - if s, ok := value.(string); ok { - return s - } - } else { - h.logger.Error(fmt.Errorf("key '%s' not found", key), "Error trying to get key from Vault secret") - return "" - } - } - - h.logger.Error(fmt.Errorf("unable to convert Vault Data value"), "Error trying to convert Data secret vaule") - return "" -} - -func (h *ScaleHandler) parseAuthRef(triggerAuthRef *kedav1alpha1.ScaledObjectAuthRef, scaledObject *kedav1alpha1.ScaledObject, resolveEnv func(string, string) string) (map[string]string, string) { - result := make(map[string]string) - podIdentity := "" - - if triggerAuthRef != nil && triggerAuthRef.Name != "" { - triggerAuth := &kedav1alpha1.TriggerAuthentication{} - err := h.client.Get(context.TODO(), types.NamespacedName{Name: triggerAuthRef.Name, Namespace: scaledObject.Namespace}, triggerAuth) - if err != nil { - h.logger.Error(err, "Error getting triggerAuth", "triggerAuthRef.Name", triggerAuthRef.Name) - } else { - podIdentity = string(triggerAuth.Spec.PodIdentity.Provider) - if triggerAuth.Spec.Env != nil { - for _, e := range triggerAuth.Spec.Env { - result[e.Parameter] = resolveEnv(e.Name, e.ContainerName) - } - } - if triggerAuth.Spec.SecretTargetRef != nil { - for _, e := range triggerAuth.Spec.SecretTargetRef { - result[e.Parameter] = h.resolveAuthSecret(e.Name, scaledObject.Namespace, e.Key) - } - } - if triggerAuth.Spec.HashiCorpVault.Secrets != nil { - vault := NewHashicorpVaultHandler(&triggerAuth.Spec.HashiCorpVault) - err := vault.Initialize(h.logger) - if err != nil { - h.logger.Error(err, "Error authenticate to Vault", "triggerAuthRef.Name", triggerAuthRef.Name) - } else { - for _, e := range triggerAuth.Spec.HashiCorpVault.Secrets { - secret, err := vault.Read(e.Path) - if err != nil { - h.logger.Error(err, "Error trying to read secret from Vault", "triggerAuthRef.Name", triggerAuthRef.Name, - "secret.path", e.Path) - continue - } - - result[e.Parameter] = h.resolveVaultSecret(secret.Data, e.Key) - } - - vault.Stop() - } - } - } - } - - return result, podIdentity -} - -func (h *ScaleHandler) getScaler(name, namespace, triggerType string, resolvedEnv, triggerMetadata, authParams map[string]string, podIdentity string) (scalers.Scaler, error) { - switch triggerType { - case "azure-queue": - return scalers.NewAzureQueueScaler(resolvedEnv, triggerMetadata, authParams, podIdentity) - case "azure-servicebus": - return scalers.NewAzureServiceBusScaler(resolvedEnv, triggerMetadata, authParams, podIdentity) - case "aws-sqs-queue": - return scalers.NewAwsSqsQueueScaler(resolvedEnv, triggerMetadata, authParams) - case "aws-cloudwatch": - return scalers.NewAwsCloudwatchScaler(resolvedEnv, triggerMetadata, authParams) - case "aws-kinesis-stream": - return scalers.NewAwsKinesisStreamScaler(resolvedEnv, triggerMetadata, authParams) - case "kafka": - return scalers.NewKafkaScaler(resolvedEnv, triggerMetadata, authParams) - case "rabbitmq": - return scalers.NewRabbitMQScaler(resolvedEnv, triggerMetadata, authParams) - case "azure-eventhub": - return scalers.NewAzureEventHubScaler(resolvedEnv, triggerMetadata) - case "prometheus": - return scalers.NewPrometheusScaler(resolvedEnv, triggerMetadata) - case "cron": - return scalers.NewCronScaler(resolvedEnv, triggerMetadata) - case "redis": - return scalers.NewRedisScaler(resolvedEnv, triggerMetadata, authParams) - case "gcp-pubsub": - return scalers.NewPubSubScaler(resolvedEnv, triggerMetadata) - case "external": - return scalers.NewExternalScaler(name, namespace, resolvedEnv, triggerMetadata) - case "liiklus": - return scalers.NewLiiklusScaler(resolvedEnv, triggerMetadata) - case "stan": - return scalers.NewStanScaler(resolvedEnv, triggerMetadata) - case "huawei-cloudeye": - return scalers.NewHuaweiCloudeyeScaler(triggerMetadata, authParams) - case "azure-blob": - return scalers.NewAzureBlobScaler(resolvedEnv, triggerMetadata, authParams, podIdentity) - case "postgresql": - return scalers.NewPostgreSQLScaler(resolvedEnv, triggerMetadata, authParams) - case "mysql": - return scalers.NewMySQLScaler(resolvedEnv, triggerMetadata, authParams) - case "azure-monitor": - return scalers.NewAzureMonitorScaler(resolvedEnv, triggerMetadata, authParams) - case "redis-streams": - return scalers.NewRedisStreamsScaler(resolvedEnv, triggerMetadata, authParams) - case "artemis-queue": - return scalers.NewArtemisQueueScaler(resolvedEnv, triggerMetadata, authParams) - default: - return nil, fmt.Errorf("no scaler found for type: %s", triggerType) - } -} diff --git a/pkg/handler/scale_jobs.go b/pkg/handler/scale_jobs.go deleted file mode 100644 index 9089f573fca..00000000000 --- a/pkg/handler/scale_jobs.go +++ /dev/null @@ -1,149 +0,0 @@ -package handler - -import ( - "context" - "fmt" - - kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" - version "github.com/kedacore/keda/version" - - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" -) - -func (h *ScaleHandler) scaleJobs(scaledObject *kedav1alpha1.ScaledObject, isActive bool, scaleTo int64, maxScale int64) { - runningJobCount := h.getRunningJobCount(scaledObject, maxScale) - h.logger.Info("Scaling Jobs", "Number of running Jobs ", runningJobCount) - - var effectiveMaxScale int64 - effectiveMaxScale = maxScale - runningJobCount - if effectiveMaxScale < 0 { - effectiveMaxScale = 0 - } - - h.logger.Info("Scaling Jobs") - - if isActive { - h.logger.V(1).Info("At least one scaler is active") - now := metav1.Now() - scaledObject.Status.LastActiveTime = &now - h.updateScaledObjectStatus(scaledObject) - h.createJobs(scaledObject, scaleTo, effectiveMaxScale) - - } else { - h.logger.V(1).Info("No change in activity") - } - return -} - -func (h *ScaleHandler) createJobs(scaledObject *kedav1alpha1.ScaledObject, scaleTo int64, maxScale int64) { - scaledObject.Spec.JobTargetRef.Template.GenerateName = scaledObject.GetName() + "-" - if scaledObject.Spec.JobTargetRef.Template.Labels == nil { - scaledObject.Spec.JobTargetRef.Template.Labels = map[string]string{} - } - scaledObject.Spec.JobTargetRef.Template.Labels["scaledobject"] = scaledObject.GetName() - - h.logger.Info("Creating jobs", "Effective number of max jobs", maxScale) - - if scaleTo > maxScale { - scaleTo = maxScale - } - h.logger.Info("Creating jobs", "Number of jobs", scaleTo) - - for i := 0; i < int(scaleTo); i++ { - - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: scaledObject.GetName() + "-", - Namespace: scaledObject.GetNamespace(), - Labels: map[string]string{ - "app.kubernetes.io/name": scaledObject.GetName(), - "app.kubernetes.io/version": version.Version, - "app.kubernetes.io/part-of": scaledObject.GetName(), - "app.kubernetes.io/managed-by": "keda-operator", - "scaledobject": scaledObject.GetName(), - }, - }, - Spec: *scaledObject.Spec.JobTargetRef.DeepCopy(), - } - - // Job doesn't allow RestartPolicyAlways, it seems like this value is set by the client as a default one, - // we should set this property to allowed value in that case - if job.Spec.Template.Spec.RestartPolicy == "" { - h.logger.V(1).Info("Job RestartPolicy is not set, setting it to 'OnFailure', to avoid setting it to the client's default value 'Always'") - job.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyOnFailure - } - - // Set ScaledObject instance as the owner and controller - err := controllerutil.SetControllerReference(scaledObject, job, h.reconcilerScheme) - if err != nil { - h.logger.Error(err, "Failed to set ScaledObject as the owner of the new Job") - } - - err = h.client.Create(context.TODO(), job) - if err != nil { - h.logger.Error(err, "Failed to create a new Job") - - } - } - h.logger.Info("Created jobs", "Number of jobs", scaleTo) - -} - -func (h *ScaleHandler) resolveJobEnv(scaledObject *kedav1alpha1.ScaledObject) (map[string]string, error) { - - if len(scaledObject.Spec.JobTargetRef.Template.Spec.Containers) < 1 { - return nil, fmt.Errorf("Scaled Object (%s) doesn't have containers", scaledObject.GetName()) - } - - container := scaledObject.Spec.JobTargetRef.Template.Spec.Containers[0] - - return h.resolveEnv(&container, scaledObject.GetNamespace()) -} - -func (h *ScaleHandler) parseJobAuthRef(triggerAuthRef *kedav1alpha1.ScaledObjectAuthRef, scaledObject *kedav1alpha1.ScaledObject) (map[string]string, string) { - return h.parseAuthRef(triggerAuthRef, scaledObject, func(name, containerName string) string { - env, err := h.resolveJobEnv(scaledObject) - if err != nil { - return "" - } - return env[name] - }) -} - -func (h *ScaleHandler) isJobFinished(j *batchv1.Job) bool { - for _, c := range j.Status.Conditions { - if (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed) && c.Status == v1.ConditionTrue { - return true - } - } - return false -} - -func (h *ScaleHandler) getRunningJobCount(scaledObject *kedav1alpha1.ScaledObject, maxScale int64) int64 { - var runningJobs int64 - - opts := []client.ListOption{ - client.InNamespace(scaledObject.GetNamespace()), - client.MatchingLabels(map[string]string{"scaledobject": scaledObject.GetName()}), - } - - jobs := &batchv1.JobList{} - err := h.client.List(context.TODO(), jobs, opts...) - - if err != nil { - return 0 - } - - for _, job := range jobs.Items { - if !h.isJobFinished(&job) { - runningJobs++ - } - } - - return runningJobs -} diff --git a/pkg/handler/scale_loop.go b/pkg/handler/scale_loop.go deleted file mode 100644 index 4eb0069d42f..00000000000 --- a/pkg/handler/scale_loop.go +++ /dev/null @@ -1,131 +0,0 @@ -package handler - -import ( - "context" - "time" - - kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" -) - -// HandleScaleLoop blocks forever and checks the scaledObject based on its pollingInterval -func (h *ScaleHandler) HandleScaleLoop(ctx context.Context, scaledObject *kedav1alpha1.ScaledObject) { - h.logger = h.logger.WithValues("ScaledObject.Namespace", scaledObject.Namespace, "ScaledObject.Name", scaledObject.Name, "ScaledObject.ScaleType", scaledObject.Spec.ScaleType) - - h.handleScale(ctx, scaledObject) - - var pollingInterval time.Duration - if scaledObject.Spec.PollingInterval != nil { - pollingInterval = time.Second * time.Duration(*scaledObject.Spec.PollingInterval) - } else { - pollingInterval = time.Second * time.Duration(defaultPollingInterval) - } - - h.logger.V(1).Info("Watching scaledObject with pollingInterval", "ScaledObject.PollingInterval", pollingInterval) - - for { - select { - case <-time.After(pollingInterval): - h.handleScale(ctx, scaledObject) - case <-ctx.Done(): - h.logger.V(1).Info("Context for scaledObject canceled") - return - } - } -} - -// handleScale contains the main logic for the ScaleHandler scaling logic. -// It'll check each trigger active status then call scaleDeployment -func (h *ScaleHandler) handleScale(ctx context.Context, scaledObject *kedav1alpha1.ScaledObject) { - - switch scaledObject.Spec.ScaleType { - case kedav1alpha1.ScaleTypeJob: - h.handleScaleJob(ctx, scaledObject) - break - default: - h.handleScaleDeployment(ctx, scaledObject) - } - return -} - -func (h *ScaleHandler) handleScaleJob(ctx context.Context, scaledObject *kedav1alpha1.ScaledObject) { - //TODO: need to actually handle the scale here - h.logger.V(1).Info("Handle Scale Job called") - scalers, err := h.getJobScalers(scaledObject) - - if err != nil { - h.logger.Error(err, "Error getting scalers") - return - } - - isScaledObjectActive := false - h.logger.Info("Scalers count", "Count", len(scalers)) - var queueLength int64 - var maxValue int64 - - for _, scaler := range scalers { - scalerLogger := h.logger.WithValues("Scaler", scaler) - - isTriggerActive, err := scaler.IsActive(ctx) - scalerLogger.Info("Active trigger", "isTriggerActive", isTriggerActive) - metricSpecs := scaler.GetMetricSpecForScaling() - - var metricValue int64 - for _, metric := range metricSpecs { - metricValue, _ = metric.External.TargetAverageValue.AsInt64() - maxValue += metricValue - } - scalerLogger.Info("Scaler max value", "MaxValue", maxValue) - - metrics, _ := scaler.GetMetrics(ctx, "queueLength", nil) - - for _, m := range metrics { - if m.MetricName == "queueLength" { - metricValue, _ = m.Value.AsInt64() - queueLength += metricValue - } - } - scalerLogger.Info("QueueLength Metric value", "queueLength", queueLength) - - if err != nil { - scalerLogger.V(1).Info("Error getting scale decision, but continue", "Error", err) - continue - } else if isTriggerActive { - isScaledObjectActive = true - scalerLogger.Info("Scaler is active") - } - scaler.Close() - } - - h.scaleJobs(scaledObject, isScaledObjectActive, queueLength, maxValue) -} - -// handleScaleDeployment contains the main logic for the ScaleHandler scaling logic. -// It'll check each trigger active status then call scaleDeployment -func (h *ScaleHandler) handleScaleDeployment(ctx context.Context, scaledObject *kedav1alpha1.ScaledObject) { - scalers, deployment, err := h.GetDeploymentScalers(scaledObject) - - if deployment == nil { - return - } - if err != nil { - h.logger.Error(err, "Error getting scalers") - return - } - - isScaledObjectActive := false - - for _, scaler := range scalers { - defer scaler.Close() - isTriggerActive, err := scaler.IsActive(ctx) - - if err != nil { - h.logger.V(1).Info("Error getting scale decision", "Error", err) - continue - } else if isTriggerActive { - isScaledObjectActive = true - h.logger.V(1).Info("Scaler for scaledObject is active", "Scaler", scaler) - } - } - - h.scaleDeployment(deployment, scaledObject, isScaledObjectActive) -} diff --git a/pkg/provider/provider.go b/pkg/provider/provider.go index cc6a6fbf7cf..08a0141701a 100644 --- a/pkg/provider/provider.go +++ b/pkg/provider/provider.go @@ -6,7 +6,7 @@ import ( "strings" kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" - "github.com/kedacore/keda/pkg/handler" + "github.com/kedacore/keda/pkg/scaling" "github.com/go-logr/logr" "github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/provider" @@ -23,7 +23,7 @@ type KedaProvider struct { client client.Client values map[provider.CustomMetricInfo]int64 externalMetrics []externalMetric - scaleHandler *handler.ScaleHandler + scaleHandler scaling.ScaleHandler watchedNamespace string } type externalMetric struct { @@ -35,7 +35,7 @@ type externalMetric struct { var logger logr.Logger // NewProvider returns an instance of KedaProvider -func NewProvider(adapterLogger logr.Logger, scaleHandler *handler.ScaleHandler, client client.Client, watchedNamespace string) provider.MetricsProvider { +func NewProvider(adapterLogger logr.Logger, scaleHandler scaling.ScaleHandler, client client.Client, watchedNamespace string) provider.MetricsProvider { provider := &KedaProvider{ values: make(map[provider.CustomMetricInfo]int64), externalMetrics: make([]externalMetric, 2, 10), @@ -78,7 +78,7 @@ func (p *KedaProvider) GetExternalMetric(namespace string, metricSelector labels scaledObject := &scaledObjects.Items[0] matchingMetrics := []external_metrics.ExternalMetricValue{} - scalers, _, err := p.scaleHandler.GetDeploymentScalers(scaledObject) + scalers, err := p.scaleHandler.GetScalers(scaledObject) if err != nil { return nil, fmt.Errorf("Error when getting scalers %s", err) } @@ -88,7 +88,7 @@ func (p *KedaProvider) GetExternalMetric(namespace string, metricSelector labels for _, metricSpec := range metricSpecs { // Filter only the desired metric - if strings.EqualFold(metricSpec.External.MetricName, info.Metric) { + if strings.EqualFold(metricSpec.External.Metric.Name, info.Metric) { metrics, err := scaler.GetMetrics(context.TODO(), info.Metric, metricSelector) if err != nil { logger.Error(err, "error getting metric for scaler", "ScaledObject.Namespace", scaledObject.Namespace, "ScaledObject.Name", scaledObject.Name, "Scaler", scaler) diff --git a/pkg/scalers/artemis_scaler.go b/pkg/scalers/artemis_scaler.go index 0ed3fc57fad..88388840d74 100644 --- a/pkg/scalers/artemis_scaler.go +++ b/pkg/scalers/artemis_scaler.go @@ -9,7 +9,7 @@ import ( "strconv" "time" - v2beta1 "k8s.io/api/autoscaling/v2beta1" + v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -180,16 +180,19 @@ func (s *artemisScaler) getQueueMessageCount() (int, error) { return messageCount, nil } -func (s *artemisScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { - return []v2beta1.MetricSpec{ - { - External: &v2beta1.ExternalMetricSource{ - MetricName: artemisQueueLengthMetricName, - TargetAverageValue: resource.NewQuantity(int64(s.metadata.queueLength), resource.DecimalSI), - }, - Type: artemisMetricType, +func (s *artemisScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { + targetMetricValue := resource.NewQuantity(int64(s.metadata.queueLength), resource.DecimalSI) + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: artemisQueueLengthMetricName, + }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetMetricValue, }, } + metricSpec := v2beta2.MetricSpec{External: externalMetric, Type: artemisMetricType} + return []v2beta2.MetricSpec{metricSpec} } //GetMetrics returns value for a supported metric and an error if there is a problem getting the metric diff --git a/pkg/scalers/aws_cloudwatch_scaler.go b/pkg/scalers/aws_cloudwatch_scaler.go index b19012e3acc..111cef3564c 100644 --- a/pkg/scalers/aws_cloudwatch_scaler.go +++ b/pkg/scalers/aws_cloudwatch_scaler.go @@ -12,7 +12,7 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/cloudwatch" - "k8s.io/api/autoscaling/v2beta1" + "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -172,13 +172,20 @@ func (c *awsCloudwatchScaler) GetMetrics(ctx context.Context, metricName string, return append([]external_metrics.ExternalMetricValue{}, metric), nil } -func (c *awsCloudwatchScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { +func (c *awsCloudwatchScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { targetMetricValue := resource.NewQuantity(int64(c.metadata.targetMetricValue), resource.DecimalSI) - externalMetric := &v2beta1.ExternalMetricSource{MetricName: fmt.Sprintf("%s-%s-%s", strings.ReplaceAll(c.metadata.namespace, "/", "-"), - c.metadata.dimensionName, c.metadata.dimensionValue), - TargetAverageValue: targetMetricValue} - metricSpec := v2beta1.MetricSpec{External: externalMetric, Type: externalMetricType} - return []v2beta1.MetricSpec{metricSpec} + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: fmt.Sprintf("%s-%s-%s", strings.ReplaceAll(c.metadata.namespace, "/", "-"), + c.metadata.dimensionName, c.metadata.dimensionValue), + }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetMetricValue, + }, + } + metricSpec := v2beta2.MetricSpec{External: externalMetric, Type: externalMetricType} + return []v2beta2.MetricSpec{metricSpec} } func (c *awsCloudwatchScaler) IsActive(ctx context.Context) (bool, error) { diff --git a/pkg/scalers/aws_kinesis_stream_scaler.go b/pkg/scalers/aws_kinesis_stream_scaler.go index 8740f427e36..2cda2238247 100644 --- a/pkg/scalers/aws_kinesis_stream_scaler.go +++ b/pkg/scalers/aws_kinesis_stream_scaler.go @@ -11,7 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/kinesis" - v2beta1 "k8s.io/api/autoscaling/v2beta1" + v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -100,12 +100,19 @@ func (s *awsKinesisStreamScaler) Close() error { return nil } -func (s *awsKinesisStreamScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { +func (s *awsKinesisStreamScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { targetShardCountQty := resource.NewQuantity(int64(s.metadata.targetShardCount), resource.DecimalSI) - externalMetric := &v2beta1.ExternalMetricSource{MetricName: fmt.Sprintf("%s-%s-%s", "AWS-Kinesis-Stream", awsKinesisStreamMetricName, s.metadata.streamName), - TargetAverageValue: targetShardCountQty} - metricSpec := v2beta1.MetricSpec{External: externalMetric, Type: externalMetricType} - return []v2beta1.MetricSpec{metricSpec} + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: fmt.Sprintf("%s-%s-%s", "AWS-Kinesis-Stream", awsKinesisStreamMetricName, s.metadata.streamName), + }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetShardCountQty, + }, + } + metricSpec := v2beta2.MetricSpec{External: externalMetric, Type: externalMetricType} + return []v2beta2.MetricSpec{metricSpec} } //GetMetrics returns value for a supported metric and an error if there is a problem getting the metric diff --git a/pkg/scalers/aws_sqs_queue_scaler.go b/pkg/scalers/aws_sqs_queue_scaler.go index 69f18b31d0c..9e4c9dd48df 100644 --- a/pkg/scalers/aws_sqs_queue_scaler.go +++ b/pkg/scalers/aws_sqs_queue_scaler.go @@ -13,7 +13,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/sqs" - v2beta1 "k8s.io/api/autoscaling/v2beta1" + v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -116,12 +116,19 @@ func (s *awsSqsQueueScaler) Close() error { return nil } -func (s *awsSqsQueueScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { +func (s *awsSqsQueueScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { targetQueueLengthQty := resource.NewQuantity(int64(s.metadata.targetQueueLength), resource.DecimalSI) - externalMetric := &v2beta1.ExternalMetricSource{MetricName: fmt.Sprintf("%s-%s-%s", "AWS-SQS-Queue", awsSqsQueueMetricName, s.metadata.queueName), - TargetAverageValue: targetQueueLengthQty} - metricSpec := v2beta1.MetricSpec{External: externalMetric, Type: externalMetricType} - return []v2beta1.MetricSpec{metricSpec} + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: fmt.Sprintf("%s-%s-%s", "AWS-SQS-Queue", awsSqsQueueMetricName, s.metadata.queueName), + }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetQueueLengthQty, + }, + } + metricSpec := v2beta2.MetricSpec{External: externalMetric, Type: externalMetricType} + return []v2beta2.MetricSpec{metricSpec} } //GetMetrics returns value for a supported metric and an error if there is a problem getting the metric diff --git a/pkg/scalers/azure/azure_monitor_test.go b/pkg/scalers/azure/azure_monitor_test.go index 453fa372d82..cf0fbb4d957 100644 --- a/pkg/scalers/azure/azure_monitor_test.go +++ b/pkg/scalers/azure/azure_monitor_test.go @@ -24,7 +24,7 @@ var testExtractAzMonitordata = []testExtractAzMonitorTestData{ {"Average Aggregation requested", false, 41, azureExternalMetricRequest{Aggregation: "Average"}, insights.Response{Value: &[]insights.Metric{insights.Metric{Timeseries: &[]insights.TimeSeriesElement{insights.TimeSeriesElement{Data: &[]insights.MetricValue{insights.MetricValue{Average: returnFloat64Ptr(41)}}}}}}}}, {"Maximum Aggregation requested", false, 42, azureExternalMetricRequest{Aggregation: "Maximum"}, insights.Response{Value: &[]insights.Metric{insights.Metric{Timeseries: &[]insights.TimeSeriesElement{insights.TimeSeriesElement{Data: &[]insights.MetricValue{insights.MetricValue{Maximum: returnFloat64Ptr(42)}}}}}}}}, {"Minimum Aggregation requested", false, 43, azureExternalMetricRequest{Aggregation: "Minimum"}, insights.Response{Value: &[]insights.Metric{insights.Metric{Timeseries: &[]insights.TimeSeriesElement{insights.TimeSeriesElement{Data: &[]insights.MetricValue{insights.MetricValue{Minimum: returnFloat64Ptr(43)}}}}}}}}, - {"Count Aggregation requested", false, 44, azureExternalMetricRequest{Aggregation: "Count"}, insights.Response{Value: &[]insights.Metric{insights.Metric{Timeseries: &[]insights.TimeSeriesElement{insights.TimeSeriesElement{Data: &[]insights.MetricValue{insights.MetricValue{Count: returnint64Ptr(44)}}}}}}}}, + {"Count Aggregation requested", false, 44, azureExternalMetricRequest{Aggregation: "Count"}, insights.Response{Value: &[]insights.Metric{insights.Metric{Timeseries: &[]insights.TimeSeriesElement{insights.TimeSeriesElement{Data: &[]insights.MetricValue{insights.MetricValue{Count: returnFloat64Ptr(44)}}}}}}}}, } func returnFloat64Ptr(x float64) *float64 { diff --git a/pkg/scalers/azure_blob_scaler.go b/pkg/scalers/azure_blob_scaler.go index c35c1012207..c311be4d7f3 100644 --- a/pkg/scalers/azure_blob_scaler.go +++ b/pkg/scalers/azure_blob_scaler.go @@ -6,7 +6,7 @@ import ( "github.com/kedacore/keda/pkg/scalers/azure" "strconv" - v2beta1 "k8s.io/api/autoscaling/v2beta1" + v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -151,11 +151,19 @@ func (s *azureBlobScaler) Close() error { return nil } -func (s *azureBlobScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { +func (s *azureBlobScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { targetBlobCount := resource.NewQuantity(int64(s.metadata.targetBlobCount), resource.DecimalSI) - externalMetric := &v2beta1.ExternalMetricSource{MetricName: blobCountMetricName, TargetAverageValue: targetBlobCount} - metricSpec := v2beta1.MetricSpec{External: externalMetric, Type: externalMetricType} - return []v2beta1.MetricSpec{metricSpec} + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: blobCountMetricName, + }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetBlobCount, + }, + } + metricSpec := v2beta2.MetricSpec{External: externalMetric, Type: externalMetricType} + return []v2beta2.MetricSpec{metricSpec} } //GetMetrics returns value for a supported metric and an error if there is a problem getting the metric diff --git a/pkg/scalers/azure_eventhub_scaler.go b/pkg/scalers/azure_eventhub_scaler.go index 161ce7c6602..a17870386d6 100644 --- a/pkg/scalers/azure_eventhub_scaler.go +++ b/pkg/scalers/azure_eventhub_scaler.go @@ -11,7 +11,7 @@ import ( eventhub "github.com/Azure/azure-event-hubs-go" "github.com/Azure/azure-storage-blob-go/azblob" - "k8s.io/api/autoscaling/v2beta1" + "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -205,16 +205,19 @@ func (scaler *AzureEventHubScaler) IsActive(ctx context.Context) (bool, error) { } // GetMetricSpecForScaling returns metric spec -func (scaler *AzureEventHubScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { - return []v2beta1.MetricSpec{ - { - External: &v2beta1.ExternalMetricSource{ - MetricName: thresholdMetricName, - TargetAverageValue: resource.NewQuantity(scaler.metadata.threshold, resource.DecimalSI), - }, - Type: eventHubMetricType, +func (scaler *AzureEventHubScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { + targetMetricVal := resource.NewQuantity(scaler.metadata.threshold, resource.DecimalSI) + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: thresholdMetricName, + }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetMetricVal, }, } + metricSpec := v2beta2.MetricSpec{External: externalMetric, Type: eventHubMetricType} + return []v2beta2.MetricSpec{metricSpec} } // GetMetrics returns metric using total number of unprocessed events in event hub diff --git a/pkg/scalers/azure_monitor_scaler.go b/pkg/scalers/azure_monitor_scaler.go index cbab9c36603..9ff0e0f280d 100644 --- a/pkg/scalers/azure_monitor_scaler.go +++ b/pkg/scalers/azure_monitor_scaler.go @@ -7,7 +7,7 @@ import ( "strconv" "strings" - v2beta1 "k8s.io/api/autoscaling/v2beta1" + v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -163,11 +163,19 @@ func (s *azureMonitorScaler) Close() error { return nil } -func (s *azureMonitorScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { +func (s *azureMonitorScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { targetMetricVal := resource.NewQuantity(int64(s.metadata.targetValue), resource.DecimalSI) - externalMetric := &v2beta1.ExternalMetricSource{MetricName: azureMonitorMetricName, TargetAverageValue: targetMetricVal} - metricSpec := v2beta1.MetricSpec{External: externalMetric, Type: externalMetricType} - return []v2beta1.MetricSpec{metricSpec} + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: azureMonitorMetricName, + }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetMetricVal, + }, + } + metricSpec := v2beta2.MetricSpec{External: externalMetric, Type: externalMetricType} + return []v2beta2.MetricSpec{metricSpec} } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric diff --git a/pkg/scalers/azure_queue_scaler.go b/pkg/scalers/azure_queue_scaler.go index 7102220a608..7ac73112127 100644 --- a/pkg/scalers/azure_queue_scaler.go +++ b/pkg/scalers/azure_queue_scaler.go @@ -6,7 +6,7 @@ import ( "github.com/kedacore/keda/pkg/scalers/azure" "strconv" - v2beta1 "k8s.io/api/autoscaling/v2beta1" + v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -132,11 +132,19 @@ func (s *azureQueueScaler) Close() error { return nil } -func (s *azureQueueScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { +func (s *azureQueueScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { targetQueueLengthQty := resource.NewQuantity(int64(s.metadata.targetQueueLength), resource.DecimalSI) - externalMetric := &v2beta1.ExternalMetricSource{MetricName: queueLengthMetricName, TargetAverageValue: targetQueueLengthQty} - metricSpec := v2beta1.MetricSpec{External: externalMetric, Type: externalMetricType} - return []v2beta1.MetricSpec{metricSpec} + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: queueLengthMetricName, + }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetQueueLengthQty, + }, + } + metricSpec := v2beta2.MetricSpec{External: externalMetric, Type: externalMetricType} + return []v2beta2.MetricSpec{metricSpec} } //GetMetrics returns value for a supported metric and an error if there is a problem getting the metric diff --git a/pkg/scalers/azure_servicebus_scaler.go b/pkg/scalers/azure_servicebus_scaler.go index e04a52768ed..ab2d0eb1613 100755 --- a/pkg/scalers/azure_servicebus_scaler.go +++ b/pkg/scalers/azure_servicebus_scaler.go @@ -8,8 +8,8 @@ import ( servicebus "github.com/Azure/azure-service-bus-go" - "github.com/Azure/azure-amqp-common-go/v2/auth" - v2beta1 "k8s.io/api/autoscaling/v2beta1" + "github.com/Azure/azure-amqp-common-go/v3/auth" + v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -144,11 +144,19 @@ func (s *azureServiceBusScaler) Close() error { } // Returns the metric spec to be used by the HPA -func (s *azureServiceBusScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { +func (s *azureServiceBusScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { targetLengthQty := resource.NewQuantity(int64(s.metadata.targetLength), resource.DecimalSI) - externalMetric := &v2beta1.ExternalMetricSource{MetricName: queueLengthMetricName, TargetAverageValue: targetLengthQty} - metricSpec := v2beta1.MetricSpec{External: externalMetric, Type: externalMetricType} - return []v2beta1.MetricSpec{metricSpec} + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: queueLengthMetricName, + }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetLengthQty, + }, + } + metricSpec := v2beta2.MetricSpec{External: externalMetric, Type: externalMetricType} + return []v2beta2.MetricSpec{metricSpec} } // Returns the current metrics to be served to the HPA diff --git a/pkg/scalers/cron_scaler.go b/pkg/scalers/cron_scaler.go index 55344b41a32..ead164c3296 100644 --- a/pkg/scalers/cron_scaler.go +++ b/pkg/scalers/cron_scaler.go @@ -7,7 +7,7 @@ import ( "time" "github.com/robfig/cron/v3" - "k8s.io/api/autoscaling/v2beta1" + "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -129,17 +129,20 @@ func (s *cronScaler) Close() error { } // GetMetricSpecForScaling returns the metric spec for the HPA -func (s *cronScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { +func (s *cronScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { specReplicas := 1 - return []v2beta1.MetricSpec{ - { - External: &v2beta1.ExternalMetricSource{ - MetricName: cronMetricName, - TargetAverageValue: resource.NewQuantity(int64(specReplicas), resource.DecimalSI), - }, - Type: cronMetricType, + targetMetricValue := resource.NewQuantity(int64(specReplicas), resource.DecimalSI) + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: cronMetricName, + }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetMetricValue, }, } + metricSpec := v2beta2.MetricSpec{External: externalMetric, Type: cronMetricType} + return []v2beta2.MetricSpec{metricSpec} } // GetMetrics finds the current value of the metric diff --git a/pkg/scalers/external_scaler.go b/pkg/scalers/external_scaler.go index 13865c69164..d45f8cb3cc9 100644 --- a/pkg/scalers/external_scaler.go +++ b/pkg/scalers/external_scaler.go @@ -3,11 +3,14 @@ package scalers import ( "context" "fmt" + "github.com/mitchellh/hashstructure" + "sync" + "time" pb "github.com/kedacore/keda/pkg/scalers/externalscaler" "google.golang.org/grpc" "google.golang.org/grpc/credentials" - v2beta1 "k8s.io/api/autoscaling/v2beta1" + v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -16,95 +19,95 @@ import ( ) type externalScaler struct { - metadata *externalScalerMetadata + metadata externalScalerMetadata scaledObjectRef pb.ScaledObjectRef - grpcClient pb.ExternalScalerClient - grpcConnection *grpc.ClientConn +} + +type externalPushScaler struct { + externalScaler } type externalScalerMetadata struct { - scalerAddress string - tlsCertFile string - metadata map[string]string + scalerAddress string + tlsCertFile string + originalMetadata map[string]string } +type connectionGroup struct { + grpcConnection *grpc.ClientConn + waitGroup *sync.WaitGroup +} + +// a pool of connectionGroup per metadata hash +var connectionPool sync.Map + var externalLog = logf.Log.WithName("external_scaler") // NewExternalScaler creates a new external scaler - calls the GRPC interface // to create a new scaler -func NewExternalScaler(name, namespace string, resolvedEnv, metadata map[string]string) (Scaler, error) { - - meta, err := parseExternalScalerMetadata(metadata, resolvedEnv) +func NewExternalScaler(name, namespace string, metadata map[string]string) (Scaler, error) { + meta, err := parseExternalScalerMetadata(metadata) if err != nil { return nil, fmt.Errorf("error parsing external scaler metadata: %s", err) } - scaler := &externalScaler{ + return &externalScaler{ metadata: meta, scaledObjectRef: pb.ScaledObjectRef{ Name: name, Namespace: namespace, }, - } - - // TODO: Pass Context - ctx := context.Background() - - // Call GRPC Interface to parse metadata - err = scaler.getGRPCClient() - if err != nil { - return nil, err - } - - request := &pb.NewRequest{ - ScaledObjectRef: &scaler.scaledObjectRef, - Metadata: scaler.metadata.metadata, - } + }, nil +} - _, err = scaler.grpcClient.New(ctx, request) +func NewExternalPushScaler(name, namespace string, metadata map[string]string) (PushScaler, error) { + meta, err := parseExternalScalerMetadata(metadata) if err != nil { - return nil, err + return nil, fmt.Errorf("error parsing external scaler metadata: %s", err) } - return scaler, nil + return &externalPushScaler{ + externalScaler{ + metadata: meta, + scaledObjectRef: pb.ScaledObjectRef{ + Name: name, + Namespace: namespace, + ScalerMetadata: meta.originalMetadata, + }, + }, + }, nil } -func parseExternalScalerMetadata(metadata, resolvedEnv map[string]string) (*externalScalerMetadata, error) { - meta := externalScalerMetadata{} +func parseExternalScalerMetadata(metadata map[string]string) (externalScalerMetadata, error) { + meta := externalScalerMetadata{ + originalMetadata: metadata, + } // Check if scalerAddress is present if val, ok := metadata["scalerAddress"]; ok && val != "" { meta.scalerAddress = val } else { - return nil, fmt.Errorf("Scaler Address is a required field") + return meta, fmt.Errorf("scaler Address is a required field") } if val, ok := metadata["tlsCertFile"]; ok && val != "" { meta.tlsCertFile = val } - meta.metadata = make(map[string]string) - - // Add elements to metadata - for key, value := range metadata { - // Check if key is in resolved environment and resolve - if val, ok := resolvedEnv[value]; ok && val != "" { - meta.metadata[key] = val - } else { - meta.metadata[key] = value - } - } - - return &meta, nil + return meta, nil } // IsActive checks if there are any messages in the subscription func (s *externalScaler) IsActive(ctx context.Context) (bool, error) { + grpcClient, done, err := getClientForConnectionPool(s.metadata) + if err != nil { + return false, err + } + defer done() - // Call GRPC Interface to check if active - response, err := s.grpcClient.IsActive(ctx, &s.scaledObjectRef) + response, err := grpcClient.IsActive(ctx, &s.scaledObjectRef) if err != nil { - externalLog.Error(err, "error") + externalLog.Error(err, "error calling IsActive on external scaler") return false, err } @@ -112,47 +115,42 @@ func (s *externalScaler) IsActive(ctx context.Context) (bool, error) { } func (s *externalScaler) Close() error { - // Call GRPC Interface to close connection - - // TODO: Pass Context - ctx := context.Background() - - _, err := s.grpcClient.Close(ctx, &s.scaledObjectRef) - if err != nil { - externalLog.Error(err, "error") - return err - } - defer s.grpcConnection.Close() - return nil } // GetMetricSpecForScaling returns the metric spec for the HPA -func (s *externalScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { +func (s *externalScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { + var result []v2beta2.MetricSpec - // TODO: Pass Context - ctx := context.Background() + grpcClient, done, err := getClientForConnectionPool(s.metadata) + if err != nil { + externalLog.Error(err, "error building grpc connection") + return result + } + defer done() - // Call GRPC Interface to get metric specs - response, err := s.grpcClient.GetMetricSpec(ctx, &s.scaledObjectRef) + response, err := grpcClient.GetMetricSpec(context.TODO(), &s.scaledObjectRef) if err != nil { externalLog.Error(err, "error") return nil } - var result []v2beta1.MetricSpec - for _, spec := range response.MetricSpecs { // Construct the target subscription size as a quantity qty := resource.NewQuantity(int64(spec.TargetSize), resource.DecimalSI) - externalMetric := &v2beta1.ExternalMetricSource{ - MetricName: spec.MetricName, - TargetAverageValue: qty, + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: spec.MetricName, + }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: qty, + }, } // Create the metric spec for the HPA - metricSpec := v2beta1.MetricSpec{ + metricSpec := v2beta2.MetricSpec{ External: externalMetric, Type: externalMetricType, } @@ -165,16 +163,19 @@ func (s *externalScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { // GetMetrics connects calls the gRPC interface to get the metrics with a specific name func (s *externalScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { - var metrics []external_metrics.ExternalMetricValue - // Call GRPC Interface to get metric specs + grpcClient, done, err := getClientForConnectionPool(s.metadata) + if err != nil { + return metrics, err + } + defer done() request := &pb.GetMetricsRequest{ MetricName: metricName, ScaledObjectRef: &s.scaledObjectRef, } - response, err := s.grpcClient.GetMetrics(ctx, request) + response, err := grpcClient.GetMetrics(ctx, request) if err != nil { externalLog.Error(err, "error") return []external_metrics.ExternalMetricValue{}, err @@ -193,27 +194,122 @@ func (s *externalScaler) GetMetrics(ctx context.Context, metricName string, metr return metrics, nil } -// getGRPCClient creates a new gRPC client -func (s *externalScaler) getGRPCClient() error { +// handleIsActiveStream is the only writer to the active channel and will close it on return. +func (s *externalPushScaler) Run(ctx context.Context, active chan<- bool) { + defer close(active) + // It's possible for the connection to get terminated anytime, we need to run this in a retry loop + runWithLog := func() { + grpcClient, done, err := getClientForConnectionPool(s.metadata) + if err != nil { + externalLog.Error(err, "error running internalRun") + return + } + handleIsActiveStream(ctx, s.scaledObjectRef, grpcClient, active) + done() + } - var err error + // retry on error from runWithLog() starting by 2 sec backing off * 2 with a max of 1 minute + retryDuration := time.Second * 2 + retryBackoff := func() <-chan time.Time { + ch := time.After(retryDuration) + retryDuration *= time.Second * 2 + if retryDuration > time.Minute*1 { + retryDuration = time.Minute * 1 + } + return ch + } - if s.metadata.tlsCertFile != "" { - certFile := fmt.Sprintf("/grpccerts/%s", s.metadata.tlsCertFile) - creds, err := credentials.NewClientTLSFromFile(certFile, "") + // start the first run without delay + runWithLog() + + for { + select { + case <-ctx.Done(): + return + case <-retryBackoff(): + runWithLog() + } + } +} + +// handleIsActiveStream calls blocks on a stream call from the GRPC server. It'll only terminate on error, stream completion, or ctx cancellation. +func handleIsActiveStream(ctx context.Context, scaledObjectRef pb.ScaledObjectRef, grpcClient pb.ExternalScalerClient, active chan<- bool) error { + stream, err := grpcClient.StreamIsActive(ctx, &scaledObjectRef) + if err != nil { + return err + } + + for { + resp, err := stream.Recv() if err != nil { return err } - s.grpcConnection, err = grpc.Dial(s.metadata.scalerAddress, grpc.WithTransportCredentials(creds)) - } else { - s.grpcConnection, err = grpc.Dial(s.metadata.scalerAddress, grpc.WithInsecure()) + + active <- resp.Result + } +} + +var connectionPoolMutex sync.Mutex + +// getClientForConnectionPool returns a grpcClient and a done() Func. The done() function must be called once the client is no longer +// in use to clean up the shared grpc.ClientConn +func getClientForConnectionPool(metadata externalScalerMetadata) (pb.ExternalScalerClient, func(), error) { + connectionPoolMutex.Lock() + defer connectionPoolMutex.Unlock() + + buildGRPCConnection := func(metadata externalScalerMetadata) (*grpc.ClientConn, error) { + if metadata.tlsCertFile != "" { + creds, err := credentials.NewClientTLSFromFile(metadata.tlsCertFile, "") + if err != nil { + return nil, err + } + return grpc.Dial(metadata.scalerAddress, grpc.WithTransportCredentials(creds)) + } + + return grpc.Dial(metadata.scalerAddress, grpc.WithInsecure()) } + // create a unique key per-metadata. If scaledObjects share the same connection properties + // in the metadata, they will share the same grpc.ClientConn + key, err := hashstructure.Hash(metadata, nil) if err != nil { - return fmt.Errorf("cannot connect to external scaler over grpc interface: %s", err) + return nil, nil, err } - s.grpcClient = pb.NewExternalScalerClient(s.grpcConnection) + if i, ok := connectionPool.Load(key); ok { + if connGroup, ok := i.(*connectionGroup); ok { + connGroup.waitGroup.Add(1) + return pb.NewExternalScalerClient(connGroup.grpcConnection), func() { + connGroup.waitGroup.Done() + }, nil + } + } - return nil + conn, err := buildGRPCConnection(metadata) + if err != nil { + return nil, nil, err + } + + waitGroup := &sync.WaitGroup{} + waitGroup.Add(1) + connGroup := connectionGroup{ + grpcConnection: conn, + waitGroup: waitGroup, + } + + connectionPool.Store(key, connGroup) + + go func() { + // clean up goroutine. + // once all waitGroup is done, remove the connection from the pool and Close() grpc.ClientConn + connGroup.waitGroup.Wait() + connectionPoolMutex.Lock() + defer connectionPoolMutex.Unlock() + connectionPool.Delete(key) + connGroup.grpcConnection.Close() + }() + + return pb.NewExternalScalerClient(connGroup.grpcConnection), func() { + connGroup.waitGroup.Done() + }, nil } diff --git a/pkg/scalers/external_scaler_test.go b/pkg/scalers/external_scaler_test.go index f8e948e2024..304c2979a6d 100644 --- a/pkg/scalers/external_scaler_test.go +++ b/pkg/scalers/external_scaler_test.go @@ -1,7 +1,16 @@ package scalers import ( + "context" + "fmt" + pb "github.com/kedacore/keda/pkg/scalers/externalscaler" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "net" + "sync/atomic" "testing" + "time" ) var testExternalScalerResolvedEnv map[string]string @@ -21,7 +30,7 @@ var testExternalScalerMetadata = []parseExternalScalerMetadataTestData{ func TestExternalScalerParseMetadata(t *testing.T) { for _, testData := range testExternalScalerMetadata { - _, err := parseExternalScalerMetadata(testData.metadata, testExternalScalerResolvedEnv) + _, err := parseExternalScalerMetadata(testData.metadata) if err != nil && !testData.isError { t.Error("Expected success but got error", err) } @@ -30,3 +39,136 @@ func TestExternalScalerParseMetadata(t *testing.T) { } } } + +func TestExternalPushScaler_Run(t *testing.T) { + const serverCount = 5 + const iterationCount = 500 + + servers := createGRPCServers(serverCount, t) + replyCh := createIsActiveChannels(serverCount * iterationCount) + + // we will send serverCount * iterationCount 'isActiveResponse' and expect resultCount == serverCount * iterationCount + var resultCount int64 + + ctx, cancel := context.WithCancel(context.Background()) + for i := 0; i < serverCount*iterationCount; i++ { + id := i % serverCount + pushScaler, _ := NewExternalPushScaler("app", "namespace", map[string]string{"scalerAddress": servers[id].address}) + go pushScaler.Run(ctx, replyCh[i]) + } + + // scaler consumer + for i, ch := range replyCh { + go func(c chan bool, id int) { + for msg := range c { + if msg { + atomic.AddInt64(&resultCount, 1) + } + } + }(ch, i) + } + + // producer + for _, s := range servers { + go func(c chan bool) { + for i := 0; i < iterationCount; i++ { + c <- true + } + }(s.publish) + } + + retries := 0 + defer cancel() + for { + select { + case <-time.After(time.Second * 1): + if resultCount == serverCount*iterationCount { + t.Logf("resultCount == %d", resultCount) + return + } + + retries++ + if retries > 10 { + t.Fatalf("Expected resultCount to be %d after %d retries, but got %d", serverCount*iterationCount, retries, resultCount) + return + } + } + } +} + +type testServer struct { + grpcServer *grpc.Server + address string + publish chan bool +} + +func createGRPCServers(count int, t *testing.T) []testServer { + result := make([]testServer, 0, count) + + for i := 0; i < count; i++ { + grpcServer := grpc.NewServer() + address := fmt.Sprintf("127.0.0.1:%d", 5050+i) + lis, _ := net.Listen("tcp", address) + activeCh := make(chan bool) + pb.RegisterExternalScalerServer(grpcServer, &testExternalScaler{ + t: t, + active: activeCh, + }) + + go func() { + if err := grpcServer.Serve(lis); err != nil { + t.Error(err, "error from grpcServer") + } + }() + + result = append(result, testServer{ + grpcServer: grpcServer, + address: address, + publish: activeCh, + }) + } + + return result +} + +func createIsActiveChannels(count int) []chan bool { + result := make([]chan bool, 0, count) + for i := 0; i < count; i++ { + result = append(result, make(chan bool)) + } + + return result +} + +type testExternalScaler struct { + t *testing.T + active chan bool +} + +func (e *testExternalScaler) IsActive(context.Context, *pb.ScaledObjectRef) (*pb.IsActiveResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method IsActive not implemented") +} +func (e *testExternalScaler) StreamIsActive(_ *pb.ScaledObjectRef, epsServer pb.ExternalScaler_StreamIsActiveServer) error { + for { + select { + case <-epsServer.Context().Done(): + // the call completed? exit + return nil + case i := <-e.active: + err := epsServer.Send(&pb.IsActiveResponse{ + Result: i, + }) + if err != nil { + e.t.Error(err) + } + } + } +} + +func (e *testExternalScaler) GetMetricSpec(context.Context, *pb.ScaledObjectRef) (*pb.GetMetricSpecResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetMetricSpec not implemented") +} + +func (e *testExternalScaler) GetMetrics(context.Context, *pb.GetMetricsRequest) (*pb.GetMetricsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetMetrics not implemented") +} diff --git a/pkg/scalers/externalscaler/externalscaler.pb.go b/pkg/scalers/externalscaler/externalscaler.pb.go index 6263daec943..c01bc76e674 100644 --- a/pkg/scalers/externalscaler/externalscaler.pb.go +++ b/pkg/scalers/externalscaler/externalscaler.pb.go @@ -7,7 +7,6 @@ import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" - empty "github.com/golang/protobuf/ptypes/empty" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" @@ -26,11 +25,12 @@ var _ = math.Inf const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type ScaledObjectRef struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + ScalerMetadata map[string]string `protobuf:"bytes,3,rep,name=scalerMetadata,proto3" json:"scalerMetadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ScaledObjectRef) Reset() { *m = ScaledObjectRef{} } @@ -72,49 +72,9 @@ func (m *ScaledObjectRef) GetNamespace() string { return "" } -type NewRequest struct { - ScaledObjectRef *ScaledObjectRef `protobuf:"bytes,1,opt,name=scaledObjectRef,proto3" json:"scaledObjectRef,omitempty"` - Metadata map[string]string `protobuf:"bytes,2,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NewRequest) Reset() { *m = NewRequest{} } -func (m *NewRequest) String() string { return proto.CompactTextString(m) } -func (*NewRequest) ProtoMessage() {} -func (*NewRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_3d382708546499d1, []int{1} -} - -func (m *NewRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NewRequest.Unmarshal(m, b) -} -func (m *NewRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NewRequest.Marshal(b, m, deterministic) -} -func (m *NewRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NewRequest.Merge(m, src) -} -func (m *NewRequest) XXX_Size() int { - return xxx_messageInfo_NewRequest.Size(m) -} -func (m *NewRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NewRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NewRequest proto.InternalMessageInfo - -func (m *NewRequest) GetScaledObjectRef() *ScaledObjectRef { - if m != nil { - return m.ScaledObjectRef - } - return nil -} - -func (m *NewRequest) GetMetadata() map[string]string { +func (m *ScaledObjectRef) GetScalerMetadata() map[string]string { if m != nil { - return m.Metadata + return m.ScalerMetadata } return nil } @@ -130,7 +90,7 @@ func (m *IsActiveResponse) Reset() { *m = IsActiveResponse{} } func (m *IsActiveResponse) String() string { return proto.CompactTextString(m) } func (*IsActiveResponse) ProtoMessage() {} func (*IsActiveResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_3d382708546499d1, []int{2} + return fileDescriptor_3d382708546499d1, []int{1} } func (m *IsActiveResponse) XXX_Unmarshal(b []byte) error { @@ -169,7 +129,7 @@ func (m *GetMetricSpecResponse) Reset() { *m = GetMetricSpecResponse{} } func (m *GetMetricSpecResponse) String() string { return proto.CompactTextString(m) } func (*GetMetricSpecResponse) ProtoMessage() {} func (*GetMetricSpecResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_3d382708546499d1, []int{3} + return fileDescriptor_3d382708546499d1, []int{2} } func (m *GetMetricSpecResponse) XXX_Unmarshal(b []byte) error { @@ -209,7 +169,7 @@ func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (m *MetricSpec) String() string { return proto.CompactTextString(m) } func (*MetricSpec) ProtoMessage() {} func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3d382708546499d1, []int{4} + return fileDescriptor_3d382708546499d1, []int{3} } func (m *MetricSpec) XXX_Unmarshal(b []byte) error { @@ -256,7 +216,7 @@ func (m *GetMetricsRequest) Reset() { *m = GetMetricsRequest{} } func (m *GetMetricsRequest) String() string { return proto.CompactTextString(m) } func (*GetMetricsRequest) ProtoMessage() {} func (*GetMetricsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_3d382708546499d1, []int{5} + return fileDescriptor_3d382708546499d1, []int{4} } func (m *GetMetricsRequest) XXX_Unmarshal(b []byte) error { @@ -302,7 +262,7 @@ func (m *GetMetricsResponse) Reset() { *m = GetMetricsResponse{} } func (m *GetMetricsResponse) String() string { return proto.CompactTextString(m) } func (*GetMetricsResponse) ProtoMessage() {} func (*GetMetricsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_3d382708546499d1, []int{6} + return fileDescriptor_3d382708546499d1, []int{5} } func (m *GetMetricsResponse) XXX_Unmarshal(b []byte) error { @@ -342,7 +302,7 @@ func (m *MetricValue) Reset() { *m = MetricValue{} } func (m *MetricValue) String() string { return proto.CompactTextString(m) } func (*MetricValue) ProtoMessage() {} func (*MetricValue) Descriptor() ([]byte, []int) { - return fileDescriptor_3d382708546499d1, []int{7} + return fileDescriptor_3d382708546499d1, []int{6} } func (m *MetricValue) XXX_Unmarshal(b []byte) error { @@ -379,8 +339,7 @@ func (m *MetricValue) GetMetricValue() int64 { func init() { proto.RegisterType((*ScaledObjectRef)(nil), "externalscaler.ScaledObjectRef") - proto.RegisterType((*NewRequest)(nil), "externalscaler.NewRequest") - proto.RegisterMapType((map[string]string)(nil), "externalscaler.NewRequest.MetadataEntry") + proto.RegisterMapType((map[string]string)(nil), "externalscaler.ScaledObjectRef.ScalerMetadataEntry") proto.RegisterType((*IsActiveResponse)(nil), "externalscaler.IsActiveResponse") proto.RegisterType((*GetMetricSpecResponse)(nil), "externalscaler.GetMetricSpecResponse") proto.RegisterType((*MetricSpec)(nil), "externalscaler.MetricSpec") @@ -392,38 +351,35 @@ func init() { func init() { proto.RegisterFile("externalscaler.proto", fileDescriptor_3d382708546499d1) } var fileDescriptor_3d382708546499d1 = []byte{ - // 484 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x53, 0x4d, 0x6f, 0xd3, 0x40, - 0x10, 0x8d, 0x63, 0x5a, 0xa5, 0x13, 0xfa, 0xc1, 0xa8, 0x54, 0x91, 0x8b, 0x20, 0xac, 0x84, 0x14, - 0x71, 0x70, 0xa5, 0x70, 0x41, 0x14, 0x09, 0x41, 0x89, 0x50, 0x25, 0x9a, 0x48, 0x1b, 0xa5, 0x12, - 0xc7, 0x8d, 0x3b, 0x8d, 0x02, 0x4e, 0x6c, 0xbc, 0xeb, 0x96, 0x70, 0xe0, 0x57, 0xf0, 0xeb, 0xf8, - 0x35, 0xc8, 0xeb, 0xef, 0x55, 0x43, 0xb8, 0xf4, 0x64, 0xef, 0xbc, 0x37, 0xb3, 0xf3, 0xde, 0xcc, - 0xc2, 0x21, 0xfd, 0x50, 0x14, 0x2d, 0x85, 0x2f, 0x3d, 0xe1, 0x53, 0xe4, 0x86, 0x51, 0xa0, 0x02, - 0xdc, 0xab, 0x47, 0x9d, 0xe3, 0x59, 0x10, 0xcc, 0x7c, 0x3a, 0xd1, 0xe8, 0x34, 0xbe, 0x3e, 0xa1, - 0x45, 0xa8, 0x56, 0x29, 0x99, 0x9d, 0xc1, 0xfe, 0x38, 0xa1, 0x5d, 0x8d, 0xa6, 0x5f, 0xc9, 0x53, - 0x9c, 0xae, 0x11, 0xe1, 0xc1, 0x52, 0x2c, 0xa8, 0x63, 0x75, 0xad, 0xde, 0x0e, 0xd7, 0xff, 0xf8, - 0x04, 0x76, 0x92, 0xaf, 0x0c, 0x85, 0x47, 0x9d, 0xa6, 0x06, 0xca, 0x00, 0xfb, 0x63, 0x01, 0x0c, - 0xe9, 0x96, 0xd3, 0xf7, 0x98, 0xa4, 0xc2, 0x73, 0xd8, 0x97, 0xf5, 0x9a, 0xba, 0x56, 0xbb, 0xff, - 0xcc, 0x35, 0x1a, 0x36, 0xae, 0xe6, 0x66, 0x1e, 0x7e, 0x84, 0xd6, 0x82, 0x94, 0xb8, 0x12, 0x4a, - 0x74, 0x9a, 0x5d, 0xbb, 0xd7, 0xee, 0xf7, 0xcc, 0x1a, 0xe5, 0xc5, 0xee, 0x45, 0x46, 0x1d, 0x2c, - 0x55, 0xb4, 0xe2, 0x45, 0xa6, 0x73, 0x0a, 0xbb, 0x35, 0x08, 0x0f, 0xc0, 0xfe, 0x46, 0xab, 0x4c, - 0x61, 0xf2, 0x8b, 0x87, 0xb0, 0x75, 0x23, 0xfc, 0x38, 0x17, 0x97, 0x1e, 0xde, 0x34, 0x5f, 0x5b, - 0xec, 0x25, 0x1c, 0x9c, 0xcb, 0xf7, 0x9e, 0x9a, 0xdf, 0x10, 0x27, 0x19, 0x06, 0x4b, 0x49, 0x78, - 0x04, 0xdb, 0x11, 0xc9, 0xd8, 0x57, 0xba, 0x44, 0x8b, 0x67, 0x27, 0x36, 0x81, 0xc7, 0x9f, 0x48, - 0x5d, 0x90, 0x8a, 0xe6, 0xde, 0x38, 0x24, 0xaf, 0x48, 0x78, 0x0b, 0xed, 0x45, 0x11, 0x95, 0x1d, - 0x4b, 0x4b, 0x71, 0x4c, 0x29, 0x95, 0xc4, 0x2a, 0x9d, 0x7d, 0x06, 0x28, 0x21, 0x7c, 0x0a, 0x90, - 0x82, 0xc3, 0x72, 0x4a, 0x95, 0x48, 0x82, 0x2b, 0x11, 0xcd, 0x48, 0x8d, 0xe7, 0x3f, 0x53, 0x3d, - 0x36, 0xaf, 0x44, 0xd8, 0x2f, 0x78, 0x54, 0x34, 0x29, 0xef, 0x61, 0x66, 0xf5, 0xfe, 0x9a, 0x66, - 0x7f, 0x6c, 0x02, 0x58, 0xbd, 0x3f, 0x73, 0xe8, 0x1d, 0x3c, 0x4c, 0x39, 0x97, 0x89, 0xf3, 0xb9, - 0x45, 0xc7, 0x77, 0x5b, 0xa4, 0x39, 0xbc, 0x96, 0xc0, 0x46, 0xd0, 0xae, 0x80, 0x1b, 0x5d, 0xea, - 0xe6, 0x13, 0xb9, 0x2c, 0xc6, 0x6e, 0xf3, 0x6a, 0xa8, 0xff, 0xdb, 0x86, 0xbd, 0x41, 0x76, 0xbb, - 0x16, 0x1d, 0xe1, 0x29, 0xd8, 0x43, 0xba, 0x45, 0x67, 0xfd, 0x0e, 0x3a, 0x47, 0x6e, 0xfa, 0xdc, - 0xdc, 0xfc, 0xb9, 0xb9, 0x83, 0xe4, 0xb9, 0xb1, 0x06, 0x8e, 0xa0, 0x95, 0x2f, 0x12, 0x6e, 0x72, - 0xd5, 0xe9, 0x9a, 0x04, 0x73, 0x07, 0x59, 0x03, 0xbf, 0xc0, 0x6e, 0x6d, 0xdb, 0x36, 0x57, 0x7d, - 0x61, 0x12, 0xee, 0xdc, 0x56, 0xd6, 0xc0, 0x09, 0x40, 0x39, 0x23, 0x7c, 0xbe, 0x36, 0x2d, 0xdf, - 0x1f, 0x87, 0xfd, 0x8b, 0x52, 0x94, 0xfd, 0x00, 0x5b, 0x67, 0x7e, 0x20, 0xff, 0x43, 0xff, 0x5a, - 0x1b, 0xa7, 0xdb, 0x3a, 0xf2, 0xea, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x69, 0x62, 0xa3, 0x07, - 0xfd, 0x04, 0x00, 0x00, + // 442 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xdf, 0x6b, 0xd4, 0x40, + 0x10, 0x6e, 0x12, 0x2d, 0xed, 0x44, 0xd3, 0x73, 0xac, 0x12, 0xa2, 0x68, 0x5c, 0x10, 0x8a, 0x0f, + 0x87, 0x5c, 0x5f, 0x44, 0x05, 0xa9, 0x50, 0xa4, 0x60, 0x3d, 0xd8, 0x70, 0x15, 0xf5, 0x69, 0x9b, + 0x8e, 0x72, 0x9a, 0xcb, 0xc5, 0xdd, 0xbd, 0xe2, 0xf9, 0xe0, 0x3f, 0xeb, 0xab, 0x7f, 0x84, 0xe4, + 0x77, 0xb2, 0x9c, 0xe6, 0xa5, 0x4f, 0xd9, 0x9d, 0xf9, 0xe6, 0xdb, 0x99, 0x6f, 0x3e, 0x02, 0xfb, + 0xf4, 0x43, 0x93, 0x4c, 0x45, 0xa2, 0x62, 0x91, 0x90, 0x1c, 0x67, 0x72, 0xa9, 0x97, 0xe8, 0xf5, + 0xa3, 0xec, 0xb7, 0x05, 0x7b, 0x51, 0x7e, 0xbc, 0x98, 0x9e, 0x7f, 0xa5, 0x58, 0x73, 0xfa, 0x8c, + 0x08, 0xd7, 0x52, 0xb1, 0x20, 0xdf, 0x0a, 0xad, 0x83, 0x5d, 0x5e, 0x9c, 0xf1, 0x3e, 0xec, 0xe6, + 0x5f, 0x95, 0x89, 0x98, 0x7c, 0xbb, 0x48, 0xb4, 0x01, 0xfc, 0x04, 0x5e, 0xc9, 0x77, 0x4a, 0x5a, + 0x5c, 0x08, 0x2d, 0x7c, 0x27, 0x74, 0x0e, 0xdc, 0xc9, 0xe1, 0xd8, 0x68, 0xc2, 0x78, 0xaa, 0xbc, + 0x37, 0x55, 0xc7, 0xa9, 0x96, 0x6b, 0x6e, 0x50, 0x05, 0x47, 0x70, 0x7b, 0x03, 0x0c, 0x47, 0xe0, + 0x7c, 0xa3, 0x75, 0xd5, 0x64, 0x7e, 0xc4, 0x7d, 0xb8, 0x7e, 0x29, 0x92, 0x55, 0xdd, 0x5f, 0x79, + 0x79, 0x6e, 0x3f, 0xb3, 0xd8, 0x13, 0x18, 0x9d, 0xa8, 0xa3, 0x58, 0xcf, 0x2f, 0x89, 0x93, 0xca, + 0x96, 0xa9, 0x22, 0xbc, 0x0b, 0xdb, 0x92, 0xd4, 0x2a, 0xd1, 0x05, 0xc5, 0x0e, 0xaf, 0x6e, 0x6c, + 0x06, 0x77, 0xde, 0x90, 0x3e, 0x25, 0x2d, 0xe7, 0x71, 0x94, 0x51, 0xdc, 0x14, 0xbc, 0x04, 0x77, + 0xd1, 0x44, 0x95, 0x6f, 0x15, 0x13, 0x06, 0xe6, 0x84, 0x9d, 0xc2, 0x2e, 0x9c, 0xbd, 0x05, 0x68, + 0x53, 0xf8, 0x00, 0xa0, 0x4c, 0xbe, 0x6b, 0x85, 0xee, 0x44, 0xf2, 0xbc, 0x16, 0xf2, 0x0b, 0xe9, + 0x68, 0xfe, 0xb3, 0x9c, 0xc7, 0xe1, 0x9d, 0x08, 0xfb, 0x05, 0xb7, 0x9a, 0x26, 0x15, 0xa7, 0xef, + 0x2b, 0x52, 0x1a, 0x4f, 0x60, 0x4f, 0xf5, 0xf5, 0x2d, 0x98, 0xdd, 0xc9, 0xc3, 0x81, 0x35, 0x70, + 0xb3, 0xce, 0xe8, 0xcf, 0x36, 0xfb, 0x63, 0x33, 0xc0, 0xee, 0xfb, 0x95, 0x42, 0xaf, 0xe0, 0x46, + 0x89, 0x39, 0xcb, 0x95, 0xaf, 0x25, 0xba, 0xb7, 0x59, 0xa2, 0x02, 0xc3, 0x7b, 0x05, 0x6c, 0x0a, + 0x6e, 0x27, 0x39, 0xa8, 0x52, 0x58, 0x6f, 0xe4, 0xac, 0x59, 0xbb, 0xc3, 0xbb, 0xa1, 0xc9, 0x1f, + 0x1b, 0xbc, 0xe3, 0xea, 0xf5, 0xd2, 0x44, 0x38, 0x85, 0x9d, 0xda, 0x0b, 0x38, 0x24, 0x4c, 0x10, + 0x9a, 0x00, 0xd3, 0x46, 0x6c, 0x0b, 0xdf, 0x83, 0x17, 0x69, 0x49, 0x62, 0x71, 0xa5, 0xb4, 0x4f, + 0x2d, 0xfc, 0x00, 0x37, 0x7b, 0x4e, 0x1c, 0xe6, 0x7d, 0x6c, 0x02, 0x36, 0x3a, 0x99, 0x6d, 0xe1, + 0x0c, 0xa0, 0xdd, 0x1f, 0x3e, 0xfa, 0x67, 0x59, 0xed, 0xad, 0x80, 0xfd, 0x0f, 0x52, 0xd3, 0xbe, + 0xc6, 0x8f, 0xa3, 0xf1, 0x8b, 0x3e, 0xf0, 0x7c, 0xbb, 0xf8, 0xf1, 0x1c, 0xfe, 0x0d, 0x00, 0x00, + 0xff, 0xff, 0xdc, 0xf1, 0x73, 0xce, 0x90, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -438,11 +394,10 @@ const _ = grpc.SupportPackageIsVersion4 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type ExternalScalerClient interface { - New(ctx context.Context, in *NewRequest, opts ...grpc.CallOption) (*empty.Empty, error) IsActive(ctx context.Context, in *ScaledObjectRef, opts ...grpc.CallOption) (*IsActiveResponse, error) + StreamIsActive(ctx context.Context, in *ScaledObjectRef, opts ...grpc.CallOption) (ExternalScaler_StreamIsActiveClient, error) GetMetricSpec(ctx context.Context, in *ScaledObjectRef, opts ...grpc.CallOption) (*GetMetricSpecResponse, error) GetMetrics(ctx context.Context, in *GetMetricsRequest, opts ...grpc.CallOption) (*GetMetricsResponse, error) - Close(ctx context.Context, in *ScaledObjectRef, opts ...grpc.CallOption) (*empty.Empty, error) } type externalScalerClient struct { @@ -453,22 +408,45 @@ func NewExternalScalerClient(cc *grpc.ClientConn) ExternalScalerClient { return &externalScalerClient{cc} } -func (c *externalScalerClient) New(ctx context.Context, in *NewRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/externalscaler.ExternalScaler/New", in, out, opts...) +func (c *externalScalerClient) IsActive(ctx context.Context, in *ScaledObjectRef, opts ...grpc.CallOption) (*IsActiveResponse, error) { + out := new(IsActiveResponse) + err := c.cc.Invoke(ctx, "/externalscaler.ExternalScaler/IsActive", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *externalScalerClient) IsActive(ctx context.Context, in *ScaledObjectRef, opts ...grpc.CallOption) (*IsActiveResponse, error) { - out := new(IsActiveResponse) - err := c.cc.Invoke(ctx, "/externalscaler.ExternalScaler/IsActive", in, out, opts...) +func (c *externalScalerClient) StreamIsActive(ctx context.Context, in *ScaledObjectRef, opts ...grpc.CallOption) (ExternalScaler_StreamIsActiveClient, error) { + stream, err := c.cc.NewStream(ctx, &_ExternalScaler_serviceDesc.Streams[0], "/externalscaler.ExternalScaler/StreamIsActive", opts...) if err != nil { return nil, err } - return out, nil + x := &externalScalerStreamIsActiveClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type ExternalScaler_StreamIsActiveClient interface { + Recv() (*IsActiveResponse, error) + grpc.ClientStream +} + +type externalScalerStreamIsActiveClient struct { + grpc.ClientStream +} + +func (x *externalScalerStreamIsActiveClient) Recv() (*IsActiveResponse, error) { + m := new(IsActiveResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil } func (c *externalScalerClient) GetMetricSpec(ctx context.Context, in *ScaledObjectRef, opts ...grpc.CallOption) (*GetMetricSpecResponse, error) { @@ -489,66 +467,35 @@ func (c *externalScalerClient) GetMetrics(ctx context.Context, in *GetMetricsReq return out, nil } -func (c *externalScalerClient) Close(ctx context.Context, in *ScaledObjectRef, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/externalscaler.ExternalScaler/Close", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - // ExternalScalerServer is the server API for ExternalScaler service. type ExternalScalerServer interface { - New(context.Context, *NewRequest) (*empty.Empty, error) IsActive(context.Context, *ScaledObjectRef) (*IsActiveResponse, error) + StreamIsActive(*ScaledObjectRef, ExternalScaler_StreamIsActiveServer) error GetMetricSpec(context.Context, *ScaledObjectRef) (*GetMetricSpecResponse, error) GetMetrics(context.Context, *GetMetricsRequest) (*GetMetricsResponse, error) - Close(context.Context, *ScaledObjectRef) (*empty.Empty, error) } // UnimplementedExternalScalerServer can be embedded to have forward compatible implementations. type UnimplementedExternalScalerServer struct { } -func (*UnimplementedExternalScalerServer) New(ctx context.Context, req *NewRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method New not implemented") -} func (*UnimplementedExternalScalerServer) IsActive(ctx context.Context, req *ScaledObjectRef) (*IsActiveResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method IsActive not implemented") } +func (*UnimplementedExternalScalerServer) StreamIsActive(req *ScaledObjectRef, srv ExternalScaler_StreamIsActiveServer) error { + return status.Errorf(codes.Unimplemented, "method StreamIsActive not implemented") +} func (*UnimplementedExternalScalerServer) GetMetricSpec(ctx context.Context, req *ScaledObjectRef) (*GetMetricSpecResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetMetricSpec not implemented") } func (*UnimplementedExternalScalerServer) GetMetrics(ctx context.Context, req *GetMetricsRequest) (*GetMetricsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetMetrics not implemented") } -func (*UnimplementedExternalScalerServer) Close(ctx context.Context, req *ScaledObjectRef) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Close not implemented") -} func RegisterExternalScalerServer(s *grpc.Server, srv ExternalScalerServer) { s.RegisterService(&_ExternalScaler_serviceDesc, srv) } -func _ExternalScaler_New_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NewRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ExternalScalerServer).New(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/externalscaler.ExternalScaler/New", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ExternalScalerServer).New(ctx, req.(*NewRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _ExternalScaler_IsActive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ScaledObjectRef) if err := dec(in); err != nil { @@ -567,6 +514,27 @@ func _ExternalScaler_IsActive_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _ExternalScaler_StreamIsActive_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ScaledObjectRef) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ExternalScalerServer).StreamIsActive(m, &externalScalerStreamIsActiveServer{stream}) +} + +type ExternalScaler_StreamIsActiveServer interface { + Send(*IsActiveResponse) error + grpc.ServerStream +} + +type externalScalerStreamIsActiveServer struct { + grpc.ServerStream +} + +func (x *externalScalerStreamIsActiveServer) Send(m *IsActiveResponse) error { + return x.ServerStream.SendMsg(m) +} + func _ExternalScaler_GetMetricSpec_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ScaledObjectRef) if err := dec(in); err != nil { @@ -603,32 +571,10 @@ func _ExternalScaler_GetMetrics_Handler(srv interface{}, ctx context.Context, de return interceptor(ctx, in, info, handler) } -func _ExternalScaler_Close_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ScaledObjectRef) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ExternalScalerServer).Close(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/externalscaler.ExternalScaler/Close", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ExternalScalerServer).Close(ctx, req.(*ScaledObjectRef)) - } - return interceptor(ctx, in, info, handler) -} - var _ExternalScaler_serviceDesc = grpc.ServiceDesc{ ServiceName: "externalscaler.ExternalScaler", HandlerType: (*ExternalScalerServer)(nil), Methods: []grpc.MethodDesc{ - { - MethodName: "New", - Handler: _ExternalScaler_New_Handler, - }, { MethodName: "IsActive", Handler: _ExternalScaler_IsActive_Handler, @@ -641,11 +587,13 @@ var _ExternalScaler_serviceDesc = grpc.ServiceDesc{ MethodName: "GetMetrics", Handler: _ExternalScaler_GetMetrics_Handler, }, + }, + Streams: []grpc.StreamDesc{ { - MethodName: "Close", - Handler: _ExternalScaler_Close_Handler, + StreamName: "StreamIsActive", + Handler: _ExternalScaler_StreamIsActive_Handler, + ServerStreams: true, }, }, - Streams: []grpc.StreamDesc{}, Metadata: "externalscaler.proto", } diff --git a/pkg/scalers/externalscaler/externalscaler.proto b/pkg/scalers/externalscaler/externalscaler.proto index b72d2242c10..1df4495455a 100644 --- a/pkg/scalers/externalscaler/externalscaler.proto +++ b/pkg/scalers/externalscaler/externalscaler.proto @@ -1,25 +1,19 @@ syntax = "proto3"; package externalscaler; - -import "google/protobuf/empty.proto"; +option go_package = ".;externalscaler"; service ExternalScaler { - rpc New(NewRequest) returns (google.protobuf.Empty) {} rpc IsActive(ScaledObjectRef) returns (IsActiveResponse) {} + rpc StreamIsActive(ScaledObjectRef) returns (stream IsActiveResponse) {} rpc GetMetricSpec(ScaledObjectRef) returns (GetMetricSpecResponse) {} rpc GetMetrics(GetMetricsRequest) returns (GetMetricsResponse) {} - rpc Close(ScaledObjectRef) returns (google.protobuf.Empty) {} } message ScaledObjectRef { string name = 1; string namespace = 2; -} - -message NewRequest { - ScaledObjectRef scaledObjectRef = 1; - map metadata = 2; + map scalerMetadata = 3; } message IsActiveResponse { diff --git a/pkg/scalers/gcp_pub_sub_scaler.go b/pkg/scalers/gcp_pub_sub_scaler.go index a4f5aa6a518..b644e9b1b42 100644 --- a/pkg/scalers/gcp_pub_sub_scaler.go +++ b/pkg/scalers/gcp_pub_sub_scaler.go @@ -5,7 +5,7 @@ import ( "fmt" "strconv" - v2beta1 "k8s.io/api/autoscaling/v2beta1" + v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -97,23 +97,28 @@ func (s *pubsubScaler) Close() error { } // GetMetricSpecForScaling returns the metric spec for the HPA -func (s *pubsubScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { +func (s *pubsubScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { // Construct the target subscription size as a quantity targetSubscriptionSizeQty := resource.NewQuantity(int64(s.metadata.targetSubscriptionSize), resource.DecimalSI) - externalMetric := &v2beta1.ExternalMetricSource{ - MetricName: pubSubSubscriptionSizeMetricName, - TargetAverageValue: targetSubscriptionSizeQty, + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: pubSubSubscriptionSizeMetricName, + }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetSubscriptionSizeQty, + }, } // Create the metric spec for the HPA - metricSpec := v2beta1.MetricSpec{ + metricSpec := v2beta2.MetricSpec{ External: externalMetric, Type: externalMetricType, } - return []v2beta1.MetricSpec{metricSpec} + return []v2beta2.MetricSpec{metricSpec} } // GetMetrics connects to Stack Driver and finds the size of the pub sub subscription diff --git a/pkg/scalers/huawei_cloudeye_scaler.go b/pkg/scalers/huawei_cloudeye_scaler.go index 12642f66a27..c40015d3c4a 100644 --- a/pkg/scalers/huawei_cloudeye_scaler.go +++ b/pkg/scalers/huawei_cloudeye_scaler.go @@ -11,7 +11,7 @@ import ( "github.com/Huawei/gophercloud/auth/aksk" "github.com/Huawei/gophercloud/openstack" "github.com/Huawei/gophercloud/openstack/ces/v1/metricdata" - "k8s.io/api/autoscaling/v2beta1" + "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -238,14 +238,21 @@ func (h *huaweiCloudeyeScaler) GetMetrics(ctx context.Context, metricName string return append([]external_metrics.ExternalMetricValue{}, metric), nil } -func (h *huaweiCloudeyeScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { +func (h *huaweiCloudeyeScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { targetMetricValue := resource.NewQuantity(int64(h.metadata.targetMetricValue), resource.DecimalSI) - externalMetric := &v2beta1.ExternalMetricSource{MetricName: fmt.Sprintf("%s-%s-%s-%s", strings.ReplaceAll(h.metadata.namespace, ".", "-"), - h.metadata.metricsName, - h.metadata.dimensionName, h.metadata.dimensionValue), - TargetAverageValue: targetMetricValue} - metricSpec := v2beta1.MetricSpec{External: externalMetric, Type: externalMetricType} - return []v2beta1.MetricSpec{metricSpec} + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: fmt.Sprintf("%s-%s-%s-%s", strings.ReplaceAll(h.metadata.namespace, ".", "-"), + h.metadata.metricsName, + h.metadata.dimensionName, h.metadata.dimensionValue), + }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetMetricValue, + }, + } + metricSpec := v2beta2.MetricSpec{External: externalMetric, Type: externalMetricType} + return []v2beta2.MetricSpec{metricSpec} } func (h *huaweiCloudeyeScaler) IsActive(ctx context.Context) (bool, error) { diff --git a/pkg/scalers/kafka_scaler.go b/pkg/scalers/kafka_scaler.go index 678efedd11d..54f26617ff4 100644 --- a/pkg/scalers/kafka_scaler.go +++ b/pkg/scalers/kafka_scaler.go @@ -11,7 +11,7 @@ import ( "time" "github.com/Shopify/sarama" - v2beta1 "k8s.io/api/autoscaling/v2beta1" + v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -92,20 +92,12 @@ func NewKafkaScaler(resolvedEnv, metadata, authParams map[string]string) (Scaler func parseKafkaMetadata(resolvedEnv, metadata, authParams map[string]string) (kafkaMetadata, error) { meta := kafkaMetadata{} - // brokerList marked as deprecated, bootstrapServers is the new one to use - if metadata["brokerList"] != "" && metadata["bootstrapServers"] != "" { - return meta, errors.New("cannot specify both bootstrapServers and brokerList (deprecated)") - } - if metadata["brokerList"] == "" && metadata["bootstrapServers"] == "" { - return meta, errors.New("no bootstrapServers or brokerList (deprecated) given") + if metadata["bootstrapServers"] == "" { + return meta, errors.New("no bootstrapServers given") } if metadata["bootstrapServers"] != "" { meta.bootstrapServers = strings.Split(metadata["bootstrapServers"], ",") } - if metadata["brokerList"] != "" { - kafkaLog.V(0).Info("WARNING: usage of brokerList is deprecated. use bootstrapServers instead.") - meta.bootstrapServers = strings.Split(metadata["brokerList"], ",") - } if metadata["consumerGroup"] == "" { return meta, errors.New("no consumer group given") @@ -267,8 +259,11 @@ func getKafkaClients(metadata kafkaMetadata) (sarama.Client, sarama.ClusterAdmin return nil, nil, fmt.Errorf("error creating kafka client: %s", err) } - admin, err := sarama.NewClusterAdmin(metadata.bootstrapServers, config) + admin, err := sarama.NewClusterAdminFromClient(client) if err != nil { + if !client.Closed() { + client.Close() + } return nil, nil, fmt.Errorf("error creating kafka admin: %s", err) } @@ -335,11 +330,8 @@ func (s *kafkaScaler) getLagForPartition(partition int32, offsets *sarama.Offset // Close closes the kafka admin and client func (s *kafkaScaler) Close() error { - err := s.client.Close() - if err != nil { - return err - } - err = s.admin.Close() + // underlying client will also be closed on admin's Close() call + err := s.admin.Close() if err != nil { return err } @@ -347,16 +339,19 @@ func (s *kafkaScaler) Close() error { return nil } -func (s *kafkaScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { - return []v2beta1.MetricSpec{ - { - External: &v2beta1.ExternalMetricSource{ - MetricName: lagThresholdMetricName, - TargetAverageValue: resource.NewQuantity(s.metadata.lagThreshold, resource.DecimalSI), - }, - Type: kafkaMetricType, +func (s *kafkaScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { + targetMetricValue := resource.NewQuantity(s.metadata.lagThreshold, resource.DecimalSI) + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: lagThresholdMetricName, + }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetMetricValue, }, } + metricSpec := v2beta2.MetricSpec{External: externalMetric, Type: kafkaMetricType} + return []v2beta2.MetricSpec{metricSpec} } //GetMetrics returns value for a supported metric and an error if there is a problem getting the metric diff --git a/pkg/scalers/kafka_scaler_test.go b/pkg/scalers/kafka_scaler_test.go index 3fb7b0728b7..41365920064 100644 --- a/pkg/scalers/kafka_scaler_test.go +++ b/pkg/scalers/kafka_scaler_test.go @@ -16,9 +16,9 @@ type parseKafkaMetadataTestData struct { // A complete valid metadata example for reference var validMetadata = map[string]string{ - "brokerList": "broker1:9092,broker2:9092", - "consumerGroup": "my-group", - "topic": "my-topic", + "bootstrapServers": "broker1:9092,broker2:9092", + "consumerGroup": "my-group", + "topic": "my-topic", } // A complete valid authParams example for sasl, with username and passwd @@ -32,22 +32,9 @@ var validWithAuthParams = map[string]string{ var validWithoutAuthParams = map[string]string{} var parseKafkaMetadataTestDataset = []parseKafkaMetadataTestData{ - // failure, no brokerList (deprecated) or bootstrapServers + // failure, no bootstrapServers {map[string]string{}, true, 0, nil, "", ""}, - // failure, both brokerList (deprecated) and bootstrapServers - {map[string]string{"brokerList": "foobar:9092", "bootstrapServers": "foobar:9092"}, true, 0, nil, "", ""}, - // tests with brokerList (deprecated) - // failure, no consumer group - {map[string]string{"brokerList": "foobar:9092"}, true, 1, []string{"foobar:9092"}, "", ""}, - // failure, no topic - {map[string]string{"brokerList": "foobar:9092", "consumerGroup": "my-group"}, true, 1, []string{"foobar:9092"}, "my-group", ""}, - // success - {map[string]string{"brokerList": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topic"}, false, 1, []string{"foobar:9092"}, "my-group", "my-topic"}, - // success, more brokers - {map[string]string{"brokerList": "foo:9092,bar:9092", "consumerGroup": "my-group", "topic": "my-topic"}, false, 2, []string{"foo:9092", "bar:9092"}, "my-group", "my-topic"}, - - // tests with bootstrapServers // failure, no consumer group {map[string]string{"bootstrapServers": "foobar:9092"}, true, 1, []string{"foobar:9092"}, "", ""}, // failure, no topic diff --git a/pkg/scalers/liiklus_scaler.go b/pkg/scalers/liiklus_scaler.go index 2c37589f986..a18f67e0771 100644 --- a/pkg/scalers/liiklus_scaler.go +++ b/pkg/scalers/liiklus_scaler.go @@ -9,7 +9,7 @@ import ( liiklus_service "github.com/kedacore/keda/pkg/scalers/liiklus" "github.com/pkg/errors" "google.golang.org/grpc" - "k8s.io/api/autoscaling/v2beta1" + "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -77,16 +77,19 @@ func (s *liiklusScaler) GetMetrics(ctx context.Context, metricName string, metri } -func (s *liiklusScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { - return []v2beta1.MetricSpec{ - { - External: &v2beta1.ExternalMetricSource{ - MetricName: liiklusLagThresholdMetricName, - TargetAverageValue: resource.NewQuantity(s.metadata.lagThreshold, resource.DecimalSI), - }, - Type: liiklusMetricType, +func (s *liiklusScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { + targetMetricValue := resource.NewQuantity(s.metadata.lagThreshold, resource.DecimalSI) + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: liiklusLagThresholdMetricName, + }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetMetricValue, }, } + metricSpec := v2beta2.MetricSpec{External: externalMetric, Type: liiklusMetricType} + return []v2beta2.MetricSpec{metricSpec} } func (s *liiklusScaler) Close() error { diff --git a/pkg/scalers/mysql_scaler.go b/pkg/scalers/mysql_scaler.go index 2c000e0caf8..9b42d6ed3bc 100644 --- a/pkg/scalers/mysql_scaler.go +++ b/pkg/scalers/mysql_scaler.go @@ -5,7 +5,7 @@ import ( "database/sql" "fmt" "github.com/go-sql-driver/mysql" - "k8s.io/api/autoscaling/v2beta1" + "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -183,16 +183,21 @@ func (s *mySQLScaler) getQueryResult() (int, error) { } // GetMetricSpecForScaling returns the MetricSpec for the Horizontal Pod Autoscaler -func (s *mySQLScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { +func (s *mySQLScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { targetQueryValue := resource.NewQuantity(int64(s.metadata.queryValue), resource.DecimalSI) - externalMetric := &v2beta1.ExternalMetricSource{ - MetricName: mySQLMetricName, - TargetAverageValue: targetQueryValue, + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: mySQLMetricName, + }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetQueryValue, + }, } - metricSpec := v2beta1.MetricSpec{ + metricSpec := v2beta2.MetricSpec{ External: externalMetric, Type: externalMetricType, } - return []v2beta1.MetricSpec{metricSpec} + return []v2beta2.MetricSpec{metricSpec} } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric diff --git a/pkg/scalers/postgresql_scaler.go b/pkg/scalers/postgresql_scaler.go index ae3f580eae2..808db29c1cd 100644 --- a/pkg/scalers/postgresql_scaler.go +++ b/pkg/scalers/postgresql_scaler.go @@ -5,7 +5,7 @@ import ( "database/sql" "fmt" _ "github.com/lib/pq" - "k8s.io/api/autoscaling/v2beta1" + "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -182,16 +182,21 @@ func (s *postgreSQLScaler) getActiveNumber() (int, error) { } // GetMetricSpecForScaling returns the MetricSpec for the Horizontal Pod Autoscaler -func (s *postgreSQLScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { +func (s *postgreSQLScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { targetQueryValue := resource.NewQuantity(int64(s.metadata.targetQueryValue), resource.DecimalSI) - externalMetric := &v2beta1.ExternalMetricSource{ - MetricName: pgMetricName, - TargetAverageValue: targetQueryValue, + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: pgMetricName, + }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetQueryValue, + }, } - metricSpec := v2beta1.MetricSpec{ + metricSpec := v2beta2.MetricSpec{ External: externalMetric, Type: externalMetricType, } - return []v2beta1.MetricSpec{metricSpec} + return []v2beta2.MetricSpec{metricSpec} } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric diff --git a/pkg/scalers/prometheus.go b/pkg/scalers/prometheus_scaler.go similarity index 87% rename from pkg/scalers/prometheus.go rename to pkg/scalers/prometheus_scaler.go index 691098e4f1e..f1691bc90fe 100644 --- a/pkg/scalers/prometheus.go +++ b/pkg/scalers/prometheus_scaler.go @@ -10,7 +10,7 @@ import ( "strconv" "time" - v2beta1 "k8s.io/api/autoscaling/v2beta1" + v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -109,16 +109,21 @@ func (s *prometheusScaler) Close() error { return nil } -func (s *prometheusScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { - return []v2beta1.MetricSpec{ - { - External: &v2beta1.ExternalMetricSource{ - TargetAverageValue: resource.NewQuantity(int64(s.metadata.threshold), resource.DecimalSI), - MetricName: s.metadata.metricName, - }, - Type: externalMetricType, +func (s *prometheusScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { + targetMetricValue := resource.NewQuantity(int64(s.metadata.threshold), resource.DecimalSI) + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: s.metadata.metricName, }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetMetricValue, + }, + } + metricSpec := v2beta2.MetricSpec{ + External: externalMetric, Type: externalMetricType, } + return []v2beta2.MetricSpec{metricSpec} } func (s *prometheusScaler) ExecutePromQuery() (float64, error) { diff --git a/pkg/scalers/rabbitmq_scaler.go b/pkg/scalers/rabbitmq_scaler.go index 44bf495bf17..beee26f5fea 100644 --- a/pkg/scalers/rabbitmq_scaler.go +++ b/pkg/scalers/rabbitmq_scaler.go @@ -11,7 +11,7 @@ import ( "time" "github.com/streadway/amqp" - v2beta1 "k8s.io/api/autoscaling/v2beta1" + v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -233,16 +233,21 @@ func (s *rabbitMQScaler) getQueueInfoViaHttp() (*queueInfo, error) { } // GetMetricSpecForScaling returns the MetricSpec for the Horizontal Pod Autoscaler -func (s *rabbitMQScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { - return []v2beta1.MetricSpec{ - { - External: &v2beta1.ExternalMetricSource{ - MetricName: rabbitQueueLengthMetricName, - TargetAverageValue: resource.NewQuantity(int64(s.metadata.queueLength), resource.DecimalSI), - }, - Type: rabbitMetricType, +func (s *rabbitMQScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { + targetMetricValue := resource.NewQuantity(int64(s.metadata.queueLength), resource.DecimalSI) + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: rabbitQueueLengthMetricName, }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetMetricValue, + }, + } + metricSpec := v2beta2.MetricSpec{ + External: externalMetric, Type: rabbitMetricType, } + return []v2beta2.MetricSpec{metricSpec} } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric diff --git a/pkg/scalers/redis_scaler.go b/pkg/scalers/redis_scaler.go index 5324bd7b345..c803ca346af 100644 --- a/pkg/scalers/redis_scaler.go +++ b/pkg/scalers/redis_scaler.go @@ -7,7 +7,7 @@ import ( "strconv" "github.com/go-redis/redis" - v2beta1 "k8s.io/api/autoscaling/v2beta1" + v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -157,11 +157,21 @@ func (s *redisScaler) Close() error { } // GetMetricSpecForScaling returns the metric spec for the HPA -func (s *redisScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { +func (s *redisScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { targetListLengthQty := resource.NewQuantity(int64(s.metadata.targetListLength), resource.DecimalSI) - externalMetric := &v2beta1.ExternalMetricSource{MetricName: listLengthMetricName, TargetAverageValue: targetListLengthQty} - metricSpec := v2beta1.MetricSpec{External: externalMetric, Type: externalMetricType} - return []v2beta1.MetricSpec{metricSpec} + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: listLengthMetricName, + }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetListLengthQty, + }, + } + metricSpec := v2beta2.MetricSpec{ + External: externalMetric, Type: externalMetricType, + } + return []v2beta2.MetricSpec{metricSpec} } // GetMetrics connects to Redis and finds the length of the list diff --git a/pkg/scalers/redis_streams_scaler.go b/pkg/scalers/redis_streams_scaler.go index 8eb0e071d47..7eda4156d85 100644 --- a/pkg/scalers/redis_streams_scaler.go +++ b/pkg/scalers/redis_streams_scaler.go @@ -7,7 +7,7 @@ import ( "strconv" "github.com/go-redis/redis" - v2beta1 "k8s.io/api/autoscaling/v2beta1" + v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -210,12 +210,19 @@ func (s *redisStreamsScaler) Close() error { } // GetMetricSpecForScaling returns the metric spec for the HPA -func (s *redisStreamsScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { - +func (s *redisStreamsScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { targetPendingEntriesCount := resource.NewQuantity(int64(s.metadata.targetPendingEntriesCount), resource.DecimalSI) - externalMetric := &v2beta1.ExternalMetricSource{MetricName: pendingEntriesCountMetricName, TargetAverageValue: targetPendingEntriesCount} - metricSpec := v2beta1.MetricSpec{External: externalMetric, Type: externalMetricType} - return []v2beta1.MetricSpec{metricSpec} + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: pendingEntriesCountMetricName, + }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetPendingEntriesCount, + }, + } + metricSpec := v2beta2.MetricSpec{External: externalMetric, Type: externalMetricType} + return []v2beta2.MetricSpec{metricSpec} } // GetMetrics fetches the number of pending entries for a consumer group in a stream diff --git a/pkg/scalers/scaler.go b/pkg/scalers/scaler.go index 07711d0a2c7..0357dc41f17 100644 --- a/pkg/scalers/scaler.go +++ b/pkg/scalers/scaler.go @@ -3,7 +3,7 @@ package scalers import ( "context" - v2beta1 "k8s.io/api/autoscaling/v2beta1" + v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" ) @@ -13,12 +13,19 @@ type Scaler interface { // The scaler returns the metric values for a metric Name and criteria matching the selector GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) - //returns the metrics based on which this scaler determines that the deployment scales. This is used to contruct the HPA spec that is created for + // Returns the metrics based on which this scaler determines that the ScaleTarget scales. This is used to contruct the HPA spec that is created for // this scaled object. The labels used should match the selectors used in GetMetrics - GetMetricSpecForScaling() []v2beta1.MetricSpec + GetMetricSpecForScaling() []v2beta2.MetricSpec IsActive(ctx context.Context) (bool, error) // Close any resources that need disposing when scaler is no longer used or destroyed Close() error } + +type PushScaler interface { + Scaler + + // Run is the only writer to the active channel and must close it once done. + Run(ctx context.Context, active chan<- bool) +} diff --git a/pkg/scalers/stan_scaler.go b/pkg/scalers/stan_scaler.go index d8c983b9439..91014464823 100644 --- a/pkg/scalers/stan_scaler.go +++ b/pkg/scalers/stan_scaler.go @@ -8,7 +8,7 @@ import ( "net/http" "strconv" - v2beta1 "k8s.io/api/autoscaling/v2beta1" + v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -179,16 +179,21 @@ func (s *stanScaler) hasPendingMessage() bool { return false } -func (s *stanScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec { - return []v2beta1.MetricSpec{ - { - External: &v2beta1.ExternalMetricSource{ - MetricName: lagThresholdMetricName, - TargetAverageValue: resource.NewQuantity(s.metadata.lagThreshold, resource.DecimalSI), - }, - Type: stanMetricType, +func (s *stanScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { + targetMetricValue := resource.NewQuantity(s.metadata.lagThreshold, resource.DecimalSI) + externalMetric := &v2beta2.ExternalMetricSource{ + Metric: v2beta2.MetricIdentifier{ + Name: lagThresholdMetricName, }, + Target: v2beta2.MetricTarget{ + Type: v2beta2.AverageValueMetricType, + AverageValue: targetMetricValue, + }, + } + metricSpec := v2beta2.MetricSpec{ + External: externalMetric, Type: stanMetricType, } + return []v2beta2.MetricSpec{metricSpec} } //GetMetrics returns value for a supported metric and an error if there is a problem getting the metric diff --git a/pkg/scaling/executor/scale_executor.go b/pkg/scaling/executor/scale_executor.go new file mode 100644 index 00000000000..8e88b3da9ce --- /dev/null +++ b/pkg/scaling/executor/scale_executor.go @@ -0,0 +1,88 @@ +package executor + +import ( + "context" + "fmt" + "github.com/go-logr/logr" + kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/scale" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +const ( + // Default cooldown period for a ScaleTarget if no cooldownPeriod is defined on the scaledObject + defaultCooldownPeriod = 5 * 60 // 5 minutes +) + +type ScaleExecutor interface { + RequestJobScale(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob, isActive bool, scaleTo int64, maxScale int64) + RequestScale(ctx context.Context, scaledObject *kedav1alpha1.ScaledObject, isActive bool) +} + +type scaleExecutor struct { + client client.Client + scaleClient *scale.ScalesGetter + reconcilerScheme *runtime.Scheme + logger logr.Logger +} + +func NewScaleExecutor(client client.Client, scaleClient *scale.ScalesGetter, reconcilerScheme *runtime.Scheme) ScaleExecutor { + return &scaleExecutor{ + client: client, + scaleClient: scaleClient, + reconcilerScheme: reconcilerScheme, + logger: logf.Log.WithName("scaleexecutor"), + } +} + +func (e *scaleExecutor) updateLastActiveTime(ctx context.Context, logger logr.Logger, object interface{}) error { + var patch client.Patch + + now := metav1.Now() + runtimeObj := object.(runtime.Object) + switch obj := runtimeObj.(type) { + case *kedav1alpha1.ScaledObject: + patch = client.MergeFrom(obj.DeepCopy()) + obj.Status.LastActiveTime = &now + case *kedav1alpha1.ScaledJob: + patch = client.MergeFrom(obj.DeepCopy()) + obj.Status.LastActiveTime = &now + default: + err := fmt.Errorf("Unknown scalable object type %v", obj) + logger.Error(err, "Failed to patch Objects Status") + return err + } + + err := e.client.Status().Patch(ctx, runtimeObj, patch) + if err != nil { + logger.Error(err, "Failed to patch Objects Status") + } + return err +} + +func (e *scaleExecutor) setActiveCondition(ctx context.Context, logger logr.Logger, object interface{}, status metav1.ConditionStatus, reason string, mesage string) error { + var patch client.Patch + + runtimeObj := object.(runtime.Object) + switch obj := runtimeObj.(type) { + case *kedav1alpha1.ScaledObject: + patch = client.MergeFrom(obj.DeepCopy()) + obj.Status.Conditions.SetActiveCondition(status, reason, mesage) + case *kedav1alpha1.ScaledJob: + patch = client.MergeFrom(obj.DeepCopy()) + obj.Status.Conditions.SetActiveCondition(status, reason, mesage) + default: + err := fmt.Errorf("Unknown scalable object type %v", obj) + logger.Error(err, "Failed to patch Objects Status") + return err + } + + err := e.client.Status().Patch(ctx, runtimeObj, patch) + if err != nil { + logger.Error(err, "Failed to patch Objects Status") + } + return err +} diff --git a/pkg/scaling/executor/scale_jobs.go b/pkg/scaling/executor/scale_jobs.go new file mode 100644 index 00000000000..4ce60706a80 --- /dev/null +++ b/pkg/scaling/executor/scale_jobs.go @@ -0,0 +1,128 @@ +package executor + +import ( + "context" + //"fmt" + + kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + version "github.com/kedacore/keda/version" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +func (e *scaleExecutor) RequestJobScale(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob, isActive bool, scaleTo int64, maxScale int64) { + runningJobCount := e.getRunningJobCount(scaledJob, maxScale) + e.logger.Info("Scaling Jobs", "Number of running Jobs ", runningJobCount) + + var effectiveMaxScale int64 + effectiveMaxScale = maxScale - runningJobCount + if effectiveMaxScale < 0 { + effectiveMaxScale = 0 + } + + e.logger.Info("Scaling Jobs") + + if isActive { + e.logger.V(1).Info("At least one scaler is active") + now := metav1.Now() + scaledJob.Status.LastActiveTime = &now + e.updateLastActiveTime(ctx, e.logger, scaledJob) + e.createJobs(scaledJob, scaleTo, effectiveMaxScale) + + } else { + e.logger.V(1).Info("No change in activity") + } + return +} + +func (e *scaleExecutor) createJobs(scaledJob *kedav1alpha1.ScaledJob, scaleTo int64, maxScale int64) { + // scaledObject.Spec.JobTargetRef.Template.GenerateName = scaledObject.GetName() + "-" + // if scaledObject.Spec.JobTargetRef.Template.Labels == nil { + // scaledObject.Spec.JobTargetRef.Template.Labels = map[string]string{} + // } + // scaledObject.Spec.JobTargetRef.Template.Labels["scaledobject"] = scaledObject.GetName() + + e.logger.Info("Creating jobs", "Effective number of max jobs", maxScale) + + if scaleTo > maxScale { + scaleTo = maxScale + } + e.logger.Info("Creating jobs", "Number of jobs", scaleTo) + + for i := 0; i < int(scaleTo); i++ { + + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: scaledJob.GetName() + "-", + Namespace: scaledJob.GetNamespace(), + Labels: map[string]string{ + "app.kubernetes.io/name": scaledJob.GetName(), + "app.kubernetes.io/version": version.Version, + "app.kubernetes.io/part-of": scaledJob.GetName(), + "app.kubernetes.io/managed-by": "keda-operator", + "scaledobject": scaledJob.GetName(), + }, + }, + //Spec: *scaledObject.Spec.JobTargetRef.DeepCopy(), + } + + // Job doesn't allow RestartPolicyAlways, it seems like this value is set by the client as a default one, + // we should set this property to allowed value in that case + if job.Spec.Template.Spec.RestartPolicy == "" { + e.logger.V(1).Info("Job RestartPolicy is not set, setting it to 'OnFailure', to avoid setting it to the client's default value 'Always'") + job.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyOnFailure + } + + // Set ScaledObject instance as the owner and controller + err := controllerutil.SetControllerReference(scaledJob, job, e.reconcilerScheme) + if err != nil { + e.logger.Error(err, "Failed to set ScaledObject as the owner of the new Job") + } + + err = e.client.Create(context.TODO(), job) + if err != nil { + e.logger.Error(err, "Failed to create a new Job") + + } + } + e.logger.Info("Created jobs", "Number of jobs", scaleTo) + +} + +func (e *scaleExecutor) isJobFinished(j *batchv1.Job) bool { + for _, c := range j.Status.Conditions { + if (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed) && c.Status == v1.ConditionTrue { + return true + } + } + return false +} + +func (e *scaleExecutor) getRunningJobCount(scaledJob *kedav1alpha1.ScaledJob, maxScale int64) int64 { + var runningJobs int64 + + opts := []client.ListOption{ + client.InNamespace(scaledJob.GetNamespace()), + client.MatchingLabels(map[string]string{"scaledjob": scaledJob.GetName()}), + } + + jobs := &batchv1.JobList{} + err := e.client.List(context.TODO(), jobs, opts...) + + if err != nil { + return 0 + } + + for _, job := range jobs.Items { + if !e.isJobFinished(&job) { + runningJobs++ + } + } + + return runningJobs +} diff --git a/pkg/scaling/executor/scale_scaledobjects.go b/pkg/scaling/executor/scale_scaledobjects.go new file mode 100644 index 00000000000..edff334227b --- /dev/null +++ b/pkg/scaling/executor/scale_scaledobjects.go @@ -0,0 +1,130 @@ +package executor + +import ( + "context" + "time" + + "github.com/go-logr/logr" + kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (e *scaleExecutor) RequestScale(ctx context.Context, scaledObject *kedav1alpha1.ScaledObject, isActive bool) { + logger := e.logger.WithValues("Scaledobject.Name", scaledObject.Name, + "ScaledObject.Namespace", scaledObject.Namespace, + "ScaleTarget.Name", scaledObject.Spec.ScaleTargetRef.Name) + + currentScale, err := e.getScaleTargetScale(ctx, scaledObject) + if err != nil { + logger.Error(err, "Error getting Scale") + } + + if currentScale.Spec.Replicas == 0 && isActive { + // current replica count is 0, but there is an active trigger. + // scale the ScaleTarget up + e.scaleFromZero(ctx, logger, scaledObject, currentScale) + } else if !isActive && + currentScale.Spec.Replicas > 0 && + (scaledObject.Spec.MinReplicaCount == nil || *scaledObject.Spec.MinReplicaCount == 0) { + // there are no active triggers, but the ScaleTarget has replicas. + // AND + // There is no minimum configured or minimum is set to ZERO. HPA will handles other scale down operations + + // Try to scale it down. + e.scaleToZero(ctx, logger, scaledObject, currentScale) + } else if !isActive && + scaledObject.Spec.MinReplicaCount != nil && + currentScale.Spec.Replicas < *scaledObject.Spec.MinReplicaCount { + // there are no active triggers + // AND + // ScaleTarget replicas count is less than minimum replica count specified in ScaledObject + // Let's set ScaleTarget replicas count to correct value + currentScale.Spec.Replicas = *scaledObject.Spec.MinReplicaCount + + err := e.updateScaleOnScaleTarget(ctx, scaledObject, currentScale) + if err == nil { + logger.Info("Successfully set ScaleTarget replicas count to ScaledObject minReplicaCount", + "ScaleTarget.Replicas", currentScale.Spec.Replicas) + } + } else if isActive { + // triggers are active, but we didn't need to scale (replica count > 0) + // Update LastActiveTime to now. + e.updateLastActiveTime(ctx, logger, scaledObject) + } else { + logger.V(1).Info("ScaleTarget no change") + } + + condition := scaledObject.Status.Conditions.GetActiveCondition() + if condition.IsUnknown() || condition.IsTrue() != isActive { + if isActive { + e.setActiveCondition(ctx, logger, scaledObject, metav1.ConditionTrue, "ScalerActive", "Scaling is performed because triggers are active") + } else { + e.setActiveCondition(ctx, logger, scaledObject, metav1.ConditionFalse, "ScalerNotActive", "Scaling is not performed because triggers are not active") + } + } + +} + +// An object will be scaled down to 0 only if it's passed its cooldown period +// or if LastActiveTime is nil +func (e *scaleExecutor) scaleToZero(ctx context.Context, logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, scale *autoscalingv1.Scale) { + var cooldownPeriod time.Duration + + if scaledObject.Spec.CooldownPeriod != nil { + cooldownPeriod = time.Second * time.Duration(*scaledObject.Spec.CooldownPeriod) + } else { + cooldownPeriod = time.Second * time.Duration(defaultCooldownPeriod) + } + + // LastActiveTime can be nil if the ScaleTarget was scaled outside of Keda. + // In this case we will ignore the cooldown period and scale it down + if scaledObject.Status.LastActiveTime == nil || + scaledObject.Status.LastActiveTime.Add(cooldownPeriod).Before(time.Now()) { + // or last time a trigger was active was > cooldown period, so scale down. + scale.Spec.Replicas = 0 + err := e.updateScaleOnScaleTarget(ctx, scaledObject, scale) + if err == nil { + logger.Info("Successfully scaled ScaleTarget to 0 replicas") + e.setActiveCondition(ctx, logger, scaledObject, metav1.ConditionFalse, "ScalerNotActive", "Scaling is not performed because triggers are not active") + } + } else { + logger.V(1).Info("ScaleTarget cooling down", + "LastActiveTime", scaledObject.Status.LastActiveTime, + "CoolDownPeriod", cooldownPeriod) + + activeCondition := scaledObject.Status.Conditions.GetActiveCondition() + if !activeCondition.IsFalse() || activeCondition.Reason != "ScalerCooldown" { + e.setActiveCondition(ctx, logger, scaledObject, metav1.ConditionFalse, "ScalerCooldown", "Scaler cooling down because triggers are not active") + } + } +} + +func (e *scaleExecutor) scaleFromZero(ctx context.Context, logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, scale *autoscalingv1.Scale) { + currentReplicas := scale.Spec.Replicas + if scaledObject.Spec.MinReplicaCount != nil && *scaledObject.Spec.MinReplicaCount > 0 { + scale.Spec.Replicas = *scaledObject.Spec.MinReplicaCount + } else { + scale.Spec.Replicas = 1 + } + + err := e.updateScaleOnScaleTarget(ctx, scaledObject, scale) + + if err == nil { + logger.Info("Successfully updated ScaleTarget", + "Original Replicas Count", currentReplicas, + "New Replicas Count", scale.Spec.Replicas) + + // Scale was successful. Update lastScaleTime and lastActiveTime on the scaledObject + e.updateLastActiveTime(ctx, logger, scaledObject) + } +} + +func (e *scaleExecutor) getScaleTargetScale(ctx context.Context, scaledObject *kedav1alpha1.ScaledObject) (*autoscalingv1.Scale, error) { + return (*e.scaleClient).Scales(scaledObject.Namespace).Get(ctx, scaledObject.Status.ScaleTargetGVKR.GroupResource(), scaledObject.Spec.ScaleTargetRef.Name, metav1.GetOptions{}) +} + +func (e *scaleExecutor) updateScaleOnScaleTarget(ctx context.Context, scaledObject *kedav1alpha1.ScaledObject, scale *autoscalingv1.Scale) error { + _, err := (*e.scaleClient).Scales(scaledObject.Namespace).Update(ctx, scaledObject.Status.ScaleTargetGVKR.GroupResource(), scale, metav1.UpdateOptions{}) + return err +} diff --git a/pkg/handler/hashicorpvault_handler.go b/pkg/scaling/resolver/hashicorpvault_handler.go similarity index 99% rename from pkg/handler/hashicorpvault_handler.go rename to pkg/scaling/resolver/hashicorpvault_handler.go index be73947ff1e..5cec579bb2b 100644 --- a/pkg/handler/hashicorpvault_handler.go +++ b/pkg/scaling/resolver/hashicorpvault_handler.go @@ -1,4 +1,4 @@ -package handler +package resolver import ( "errors" diff --git a/pkg/scaling/resolver/scale_resolvers.go b/pkg/scaling/resolver/scale_resolvers.go new file mode 100644 index 00000000000..c7e0b426e02 --- /dev/null +++ b/pkg/scaling/resolver/scale_resolvers.go @@ -0,0 +1,243 @@ +package resolver + +import ( + "context" + "fmt" + + kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func ResolveContainerEnv(client client.Client, logger logr.Logger, podSpec *corev1.PodSpec, containerName, namespace string) (map[string]string, error) { + + if len(podSpec.Containers) < 1 { + return nil, fmt.Errorf("Target object doesn't have containers") + } + + var container corev1.Container + if containerName != "" { + for _, c := range podSpec.Containers { + if c.Name == containerName { + container = c + break + } + } + + if &container == nil { + return nil, fmt.Errorf("Couldn't find container with name %s on Target object", containerName) + } + } else { + container = podSpec.Containers[0] + } + + return resolveEnv(client, logger, &container, namespace) +} + +func ResolveAuthRef(client client.Client, logger logr.Logger, triggerAuthRef *kedav1alpha1.ScaledObjectAuthRef, podSpec *corev1.PodSpec, namespace string) (map[string]string, string) { + result := make(map[string]string) + podIdentity := "" + + if triggerAuthRef != nil && triggerAuthRef.Name != "" { + triggerAuth := &kedav1alpha1.TriggerAuthentication{} + err := client.Get(context.TODO(), types.NamespacedName{Name: triggerAuthRef.Name, Namespace: namespace}, triggerAuth) + if err != nil { + logger.Error(err, "Error getting triggerAuth", "triggerAuthRef.Name", triggerAuthRef.Name) + } else { + podIdentity = string(triggerAuth.Spec.PodIdentity.Provider) + if triggerAuth.Spec.Env != nil { + for _, e := range triggerAuth.Spec.Env { + env, err := ResolveContainerEnv(client, logger, podSpec, e.ContainerName, namespace) + if err != nil { + result[e.Parameter] = "" + } else { + result[e.Parameter] = env[e.Name] + } + } + } + if triggerAuth.Spec.SecretTargetRef != nil { + for _, e := range triggerAuth.Spec.SecretTargetRef { + result[e.Parameter] = resolveAuthSecret(client, logger, e.Name, namespace, e.Key) + } + } + if triggerAuth.Spec.HashiCorpVault.Secrets != nil { + vault := NewHashicorpVaultHandler(&triggerAuth.Spec.HashiCorpVault) + err := vault.Initialize(logger) + if err != nil { + logger.Error(err, "Error authenticate to Vault", "triggerAuthRef.Name", triggerAuthRef.Name) + } else { + for _, e := range triggerAuth.Spec.HashiCorpVault.Secrets { + secret, err := vault.Read(e.Path) + if err != nil { + logger.Error(err, "Error trying to read secret from Vault", "triggerAuthRef.Name", triggerAuthRef.Name, + "secret.path", e.Path) + continue + } + + result[e.Parameter] = resolveVaultSecret(logger, secret.Data, e.Key) + } + + vault.Stop() + } + } + } + } + + return result, podIdentity +} + +func resolveEnv(client client.Client, logger logr.Logger, container *corev1.Container, namespace string) (map[string]string, error) { + resolved := make(map[string]string) + + if container.EnvFrom != nil { + for _, source := range container.EnvFrom { + if source.ConfigMapRef != nil { + if configMap, err := resolveConfigMap(client, source.ConfigMapRef, namespace); err == nil { + for k, v := range configMap { + resolved[k] = v + } + } else if source.ConfigMapRef.Optional != nil && *source.ConfigMapRef.Optional { + // ignore error when ConfigMap is marked as optional + continue + } else { + return nil, fmt.Errorf("error reading config ref %s on namespace %s/: %s", source.ConfigMapRef, namespace, err) + } + } else if source.SecretRef != nil { + if secretsMap, err := resolveSecretMap(client, source.SecretRef, namespace); err == nil { + for k, v := range secretsMap { + resolved[k] = v + } + } else if source.SecretRef.Optional != nil && *source.SecretRef.Optional { + // ignore error when Secret is marked as optional + continue + } else { + return nil, fmt.Errorf("error reading secret ref %s on namespace %s: %s", source.SecretRef, namespace, err) + } + } + } + + } + + if container.Env != nil { + for _, envVar := range container.Env { + var value string + var err error + + // env is either a name/value pair or an EnvVarSource + if envVar.Value != "" { + value = envVar.Value + } else if envVar.ValueFrom != nil { + // env is an EnvVarSource, that can be on of the 4 below + if envVar.ValueFrom.SecretKeyRef != nil { + // env is a secret selector + value, err = resolveSecretValue(client, envVar.ValueFrom.SecretKeyRef, envVar.ValueFrom.SecretKeyRef.Key, namespace) + if err != nil { + return nil, fmt.Errorf("error resolving secret name %s for env %s in namespace %s", + envVar.ValueFrom.SecretKeyRef, + envVar.Name, + namespace) + } + } else if envVar.ValueFrom.ConfigMapKeyRef != nil { + // env is a configMap selector + value, err = resolveConfigValue(client, envVar.ValueFrom.ConfigMapKeyRef, envVar.ValueFrom.ConfigMapKeyRef.Key, namespace) + if err != nil { + return nil, fmt.Errorf("error resolving config %s for env %s in namespace %s", + envVar.ValueFrom.ConfigMapKeyRef, + envVar.Name, + namespace) + } + } else { + logger.V(1).Info("cannot resolve env %s to a value. fieldRef and resourceFieldRef env are skipped", envVar.Name) + continue + } + + } + resolved[envVar.Name] = value + } + + } + + return resolved, nil +} + +func resolveConfigMap(client client.Client, configMapRef *corev1.ConfigMapEnvSource, namespace string) (map[string]string, error) { + configMap := &corev1.ConfigMap{} + err := client.Get(context.TODO(), types.NamespacedName{Name: configMapRef.Name, Namespace: namespace}, configMap) + if err != nil { + return nil, err + } + return configMap.Data, nil +} + +func resolveSecretMap(client client.Client, secretMapRef *corev1.SecretEnvSource, namespace string) (map[string]string, error) { + secret := &corev1.Secret{} + err := client.Get(context.TODO(), types.NamespacedName{Name: secretMapRef.Name, Namespace: namespace}, secret) + if err != nil { + return nil, err + } + + secretsStr := make(map[string]string) + for k, v := range secret.Data { + secretsStr[k] = string(v) + } + return secretsStr, nil +} + +func resolveSecretValue(client client.Client, secretKeyRef *corev1.SecretKeySelector, keyName, namespace string) (string, error) { + secret := &corev1.Secret{} + err := client.Get(context.TODO(), types.NamespacedName{Name: secretKeyRef.Name, Namespace: namespace}, secret) + if err != nil { + return "", err + } + return string(secret.Data[keyName]), nil + +} + +func resolveConfigValue(client client.Client, configKeyRef *corev1.ConfigMapKeySelector, keyName, namespace string) (string, error) { + configMap := &corev1.ConfigMap{} + err := client.Get(context.TODO(), types.NamespacedName{Name: configKeyRef.Name, Namespace: namespace}, configMap) + if err != nil { + return "", err + } + return string(configMap.Data[keyName]), nil +} + +func resolveAuthSecret(client client.Client, logger logr.Logger, name, namespace, key string) string { + if name == "" || namespace == "" || key == "" { + logger.Error(fmt.Errorf("error trying to get secret"), "name, namespace and key are required", "Secret.Namespace", namespace, "Secret.Name", name, "key", key) + return "" + } + + secret := &corev1.Secret{} + err := client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, secret) + if err != nil { + logger.Error(err, "Error trying to get secret from namespace", "Secret.Namespace", namespace, "Secret.Name", name) + return "" + } + result := secret.Data[key] + + if result == nil { + return "" + } + + return string(result) +} + +func resolveVaultSecret(logger logr.Logger, data map[string]interface{}, key string) string { + if v2Data, ok := data["data"].(map[string]interface{}); ok { + if value, ok := v2Data[key]; ok { + if s, ok := value.(string); ok { + return s + } + } else { + logger.Error(fmt.Errorf("key '%s' not found", key), "Error trying to get key from Vault secret") + return "" + } + } + + logger.Error(fmt.Errorf("unable to convert Vault Data value"), "Error trying to convert Data secret vaule") + return "" +} diff --git a/pkg/handler/scale_handler_test.go b/pkg/scaling/resolver/scale_resolvers_test.go similarity index 93% rename from pkg/handler/scale_handler_test.go rename to pkg/scaling/resolver/scale_resolvers_test.go index 20e08103629..b52dc75ebc4 100644 --- a/pkg/handler/scale_handler_test.go +++ b/pkg/scaling/resolver/scale_resolvers_test.go @@ -1,11 +1,11 @@ -package handler +package resolver import ( "testing" corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client/fake" + logf "sigs.k8s.io/controller-runtime/pkg/log" ) var ( @@ -108,9 +108,7 @@ var testMetadatas = []testMetadata{ func TestResolveNonExistingConfigMapsOrSecretsEnv(t *testing.T) { for _, testData := range testMetadatas { - testScaleHandler := NewScaleHandler(fake.NewFakeClient(), scheme.Scheme) - - _, err := testScaleHandler.resolveEnv(testData.container, namespace) + _, err := resolveEnv(fake.NewFakeClient(), logf.Log.WithName("test"), testData.container, namespace) if err != nil && !testData.isError { t.Errorf("Expected success because %s got error, %s", testData.comment, err) diff --git a/pkg/scaling/scale_handler.go b/pkg/scaling/scale_handler.go new file mode 100644 index 00000000000..b47fe9a77f9 --- /dev/null +++ b/pkg/scaling/scale_handler.go @@ -0,0 +1,420 @@ +package scaling + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/kedacore/keda/pkg/apis/duck" + kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + "github.com/kedacore/keda/pkg/scalers" + "github.com/kedacore/keda/pkg/scaling/executor" + "github.com/kedacore/keda/pkg/scaling/resolver" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/scale" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +const ( + // Default polling interval for a ScaledObject triggers if no pollingInterval is defined. + defaultPollingInterval = 30 +) + +// ScaleHandler encapsulates the logic of calling the right scalers for +// each ScaledObject and making the final scale decision and operation +type ScaleHandler interface { + HandleScalableObject(scalableObject interface{}) error + DeleteScalableObject(scalableObject interface{}) error + GetScalers(scalableObject interface{}) ([]scalers.Scaler, error) +} + +type scaleHandler struct { + client client.Client + logger logr.Logger + scaleLoopContexts *sync.Map + scaleExecutor executor.ScaleExecutor +} + +// NewScaleHandler creates a ScaleHandler object +func NewScaleHandler(client client.Client, scaleClient *scale.ScalesGetter, reconcilerScheme *runtime.Scheme) ScaleHandler { + return &scaleHandler{ + client: client, + logger: logf.Log.WithName("scalehandler"), + scaleLoopContexts: &sync.Map{}, + scaleExecutor: executor.NewScaleExecutor(client, scaleClient, reconcilerScheme), + } +} + +func (h *scaleHandler) GetScalers(scalableObject interface{}) ([]scalers.Scaler, error) { + withTriggers, err := asDuckWithTriggers(scalableObject) + if err != nil { + return nil, err + } + + podTemplateSpec, containerName, err := h.getPods(scalableObject) + if err != nil { + return nil, err + } + + return h.buildScalers(withTriggers, podTemplateSpec, containerName) +} + +func (h *scaleHandler) HandleScalableObject(scalableObject interface{}) error { + withTriggers, err := asDuckWithTriggers(scalableObject) + if err != nil { + h.logger.Error(err, "error duck typing object into withTrigger") + return err + } + + key := generateKey(withTriggers) + + ctx, cancel := context.WithCancel(context.TODO()) + + // cancel the outdated ScaleLoop for the same ScaledObject (if exists) + value, loaded := h.scaleLoopContexts.LoadOrStore(key, cancel) + if loaded { + cancelValue, ok := value.(context.CancelFunc) + if ok { + cancelValue() + } + h.scaleLoopContexts.Store(key, cancel) + } + + // a mutex is used to synchronize scale requests per scalableObject + scalingMutex := &sync.Mutex{} + go h.startPushScalers(ctx, withTriggers, scalableObject, scalingMutex) + go h.startScaleLoop(ctx, withTriggers, scalableObject, scalingMutex) + return nil +} + +func (h *scaleHandler) DeleteScalableObject(scalableObject interface{}) error { + withTriggers, err := asDuckWithTriggers(scalableObject) + if err != nil { + h.logger.Error(err, "error duck typing object into withTrigger") + return err + } + + key := generateKey(withTriggers) + + result, ok := h.scaleLoopContexts.Load(key) + if ok { + cancel, ok := result.(context.CancelFunc) + if ok { + cancel() + } + h.scaleLoopContexts.Delete(key) + } else { + h.logger.V(1).Info("ScaleObject was not found in controller cache", "key", key) + } + + return nil +} + +// startScaleLoop blocks forever and checks the scaledObject based on its pollingInterval +func (h *scaleHandler) startScaleLoop(ctx context.Context, withTriggers *kedav1alpha1.WithTriggers, scalableObject interface{}, scalingMutex *sync.Mutex) { + logger := h.logger.WithValues("type", withTriggers.Kind, "namespace", withTriggers.Namespace, "name", withTriggers.Name) + + // kick off one check to the scalers now + h.checkScalers(ctx, scalableObject, scalingMutex) + + pollingInterval := getPollingInterval(withTriggers) + logger.V(1).Info("Watching with pollingInterval", "PollingInterval", pollingInterval) + + for { + select { + case <-time.After(pollingInterval): + h.checkScalers(ctx, scalableObject, scalingMutex) + case <-ctx.Done(): + logger.V(1).Info("Context canceled") + return + } + } +} + +func (h *scaleHandler) startPushScalers(ctx context.Context, withTriggers *kedav1alpha1.WithTriggers, scalableObject interface{}, scalingMutex *sync.Mutex) { + logger := h.logger.WithValues("type", withTriggers.Kind, "namespace", withTriggers.Namespace, "name", withTriggers.Name) + ss, err := h.GetScalers(scalableObject) + if err != nil { + logger.Error(err, "Error getting scalers", "object", scalableObject) + return + } + + for _, s := range ss { + scaler, ok := s.(scalers.PushScaler) + if !ok { + continue + } + + go func() { + activeCh := make(chan bool) + go scaler.Run(ctx, activeCh) + for { + select { + case <-ctx.Done(): + return + case active := <-activeCh: + scalingMutex.Lock() + switch obj := scalableObject.(type) { + case *kedav1alpha1.ScaledObject: + h.scaleExecutor.RequestScale(ctx, obj, active) + case *kedav1alpha1.ScaledJob: + // TODO: revisit when implementing ScaledJob + h.scaleExecutor.RequestJobScale(ctx, obj, active, 1, 1) + } + scalingMutex.Unlock() + } + } + }() + } +} + +// checkScalers contains the main logic for the ScaleHandler scaling logic. +// It'll check each trigger active status then call RequestScale +func (h *scaleHandler) checkScalers(ctx context.Context, scalableObject interface{}, scalingMutex *sync.Mutex) { + scalers, err := h.GetScalers(scalableObject) + if err != nil { + h.logger.Error(err, "Error getting scalers", "object", scalableObject) + return + } + + scalingMutex.Lock() + defer scalingMutex.Unlock() + switch obj := scalableObject.(type) { + case *kedav1alpha1.ScaledObject: + h.scaleExecutor.RequestScale(ctx, obj, h.checkScaledObjectScalers(ctx, scalers)) + case *kedav1alpha1.ScaledJob: + isActive, scaleTo, maxScale := h.checkScaledJobScalers(ctx, scalers) + h.scaleExecutor.RequestJobScale(ctx, obj, isActive, scaleTo, maxScale) + } +} + +func (h *scaleHandler) checkScaledObjectScalers(ctx context.Context, scalers []scalers.Scaler) bool { + isActive := false + for _, scaler := range scalers { + isTriggerActive, err := scaler.IsActive(ctx) + scaler.Close() + + if err != nil { + h.logger.V(1).Info("Error getting scale decision", "Error", err) + continue + } else if isTriggerActive { + isActive = true + h.logger.V(1).Info("Scaler for scaledObject is active", "Scaler", scaler) + } + } + return isActive +} + +func (h *scaleHandler) checkScaledJobScalers(ctx context.Context, scalers []scalers.Scaler) (bool, int64, int64) { + var queueLength int64 + var maxValue int64 + isActive := false + + for _, scaler := range scalers { + scalerLogger := h.logger.WithValues("Scaler", scaler) + + isTriggerActive, err := scaler.IsActive(ctx) + scaler.Close() + scalerLogger.Info("Active trigger", "isTriggerActive", isTriggerActive) + metricSpecs := scaler.GetMetricSpecForScaling() + + var metricValue int64 + for _, metric := range metricSpecs { + metricValue, _ = metric.External.Target.AverageValue.AsInt64() + maxValue += metricValue + } + scalerLogger.Info("Scaler max value", "MaxValue", maxValue) + + metrics, _ := scaler.GetMetrics(ctx, "queueLength", nil) + + for _, m := range metrics { + if m.MetricName == "queueLength" { + metricValue, _ = m.Value.AsInt64() + queueLength += metricValue + } + } + scalerLogger.Info("QueueLength Metric value", "queueLength", queueLength) + + if err != nil { + scalerLogger.V(1).Info("Error getting scale decision, but continue", "Error", err) + continue + } else if isTriggerActive { + isActive = true + scalerLogger.Info("Scaler is active") + } + } + + return isActive, queueLength, maxValue +} + +// buildScalers returns list of Scalers for the specified triggers +func (h *scaleHandler) buildScalers(withTriggers *kedav1alpha1.WithTriggers, podTemplateSpec *corev1.PodTemplateSpec, containerName string) ([]scalers.Scaler, error) { + logger := h.logger.WithValues("type", withTriggers.Kind, "namespace", withTriggers.Namespace, "name", withTriggers.Name) + var scalersRes []scalers.Scaler + + resolvedEnv, err := resolver.ResolveContainerEnv(h.client, logger, &podTemplateSpec.Spec, containerName, withTriggers.Namespace) + if err != nil { + return scalersRes, fmt.Errorf("error resolving secrets for ScaleTarget: %s", err) + } + + for i, trigger := range withTriggers.Spec.Triggers { + authParams, podIdentity := resolver.ResolveAuthRef(h.client, logger, trigger.AuthenticationRef, &podTemplateSpec.Spec, withTriggers.Namespace) + + if podIdentity == kedav1alpha1.PodIdentityProviderAwsEKS { + serviceAccountName := podTemplateSpec.Spec.ServiceAccountName + serviceAccount := &corev1.ServiceAccount{} + err = h.client.Get(context.TODO(), types.NamespacedName{Name: serviceAccountName, Namespace: withTriggers.Namespace}, serviceAccount) + if err != nil { + closeScalers(scalersRes) + return []scalers.Scaler{}, fmt.Errorf("error getting service account: %s", err) + } + authParams["awsRoleArn"] = serviceAccount.Annotations[kedav1alpha1.PodIdentityAnnotationEKS] + } else if podIdentity == kedav1alpha1.PodIdentityProviderAwsKiam { + authParams["awsRoleArn"] = podTemplateSpec.ObjectMeta.Annotations[kedav1alpha1.PodIdentityAnnotationKiam] + } + + scaler, err := buildScaler(withTriggers.Name, withTriggers.Namespace, trigger.Type, resolvedEnv, trigger.Metadata, authParams, podIdentity) + if err != nil { + closeScalers(scalersRes) + return []scalers.Scaler{}, fmt.Errorf("error getting scaler for trigger #%d: %s", i, err) + } + + scalersRes = append(scalersRes, scaler) + } + + return scalersRes, nil +} + +func (h *scaleHandler) getPods(scalableObject interface{}) (*corev1.PodTemplateSpec, string, error) { + switch obj := scalableObject.(type) { + case *kedav1alpha1.ScaledObject: + unstruct := &unstructured.Unstructured{} + unstruct.SetGroupVersionKind(obj.Status.ScaleTargetGVKR.GroupVersionKind()) + if err := h.client.Get(context.TODO(), client.ObjectKey{Namespace: obj.Namespace, Name: obj.Spec.ScaleTargetRef.Name}, unstruct); err != nil { + // resource doesn't exist + h.logger.Error(err, "Target resource doesn't exist", "resource", obj.Status.ScaleTargetGVKR.GVKString(), "name", obj.Spec.ScaleTargetRef.Name) + return nil, "", err + } + + withPods := &kedav1alpha1.WithPod{} + if err := duck.FromUnstructured(unstruct, withPods); err != nil { + h.logger.Error(err, "Cannot convert unstructured into PodSpecable Duck-type", "object", unstruct) + } + + if withPods.Spec.Template.Spec.Containers == nil { + h.logger.Info("There aren't any containers in the ScaleTarget", "resource", obj.Status.ScaleTargetGVKR.GVKString(), "name", obj.Spec.ScaleTargetRef.Name) + return nil, "", fmt.Errorf("no containers found") + } + + podTemplateSpec := corev1.PodTemplateSpec{ + ObjectMeta: withPods.ObjectMeta, + Spec: withPods.Spec.Template.Spec, + } + return &podTemplateSpec, obj.Spec.ScaleTargetRef.ContainerName, nil + case *kedav1alpha1.ScaledJob: + // TODO add ContainerName for ScaledJobs!! + return &obj.Spec.JobTargetRef.Template, "", nil + default: + return nil, "", fmt.Errorf("unknown scalable object type %v", scalableObject) + } +} + +func buildScaler(name, namespace, triggerType string, resolvedEnv, triggerMetadata, authParams map[string]string, podIdentity string) (scalers.Scaler, error) { + switch triggerType { + case "azure-queue": + return scalers.NewAzureQueueScaler(resolvedEnv, triggerMetadata, authParams, podIdentity) + case "azure-servicebus": + return scalers.NewAzureServiceBusScaler(resolvedEnv, triggerMetadata, authParams, podIdentity) + case "aws-sqs-queue": + return scalers.NewAwsSqsQueueScaler(resolvedEnv, triggerMetadata, authParams) + case "aws-cloudwatch": + return scalers.NewAwsCloudwatchScaler(resolvedEnv, triggerMetadata, authParams) + case "aws-kinesis-stream": + return scalers.NewAwsKinesisStreamScaler(resolvedEnv, triggerMetadata, authParams) + case "kafka": + return scalers.NewKafkaScaler(resolvedEnv, triggerMetadata, authParams) + case "rabbitmq": + return scalers.NewRabbitMQScaler(resolvedEnv, triggerMetadata, authParams) + case "azure-eventhub": + return scalers.NewAzureEventHubScaler(resolvedEnv, triggerMetadata) + case "prometheus": + return scalers.NewPrometheusScaler(resolvedEnv, triggerMetadata) + case "redis": + return scalers.NewRedisScaler(resolvedEnv, triggerMetadata, authParams) + case "gcp-pubsub": + return scalers.NewPubSubScaler(resolvedEnv, triggerMetadata) + case "external": + return scalers.NewExternalScaler(name, namespace, triggerMetadata) + case "external-push": + return scalers.NewExternalPushScaler(name, namespace, triggerMetadata) + case "liiklus": + return scalers.NewLiiklusScaler(resolvedEnv, triggerMetadata) + case "stan": + return scalers.NewStanScaler(resolvedEnv, triggerMetadata) + case "huawei-cloudeye": + return scalers.NewHuaweiCloudeyeScaler(triggerMetadata, authParams) + case "azure-blob": + return scalers.NewAzureBlobScaler(resolvedEnv, triggerMetadata, authParams, podIdentity) + case "postgresql": + return scalers.NewPostgreSQLScaler(resolvedEnv, triggerMetadata, authParams) + case "mysql": + return scalers.NewMySQLScaler(resolvedEnv, triggerMetadata, authParams) + case "azure-monitor": + return scalers.NewAzureMonitorScaler(resolvedEnv, triggerMetadata, authParams) + default: + return nil, fmt.Errorf("no scaler found for type: %s", triggerType) + } +} + +func asDuckWithTriggers(scalableObject interface{}) (*kedav1alpha1.WithTriggers, error) { + withTriggers := &kedav1alpha1.WithTriggers{} + switch obj := scalableObject.(type) { + case *kedav1alpha1.ScaledObject: + withTriggers = &kedav1alpha1.WithTriggers{ + TypeMeta: obj.TypeMeta, + ObjectMeta: obj.ObjectMeta, + Spec: kedav1alpha1.WithTriggersSpec{ + PollingInterval: obj.Spec.PollingInterval, + Triggers: obj.Spec.Triggers, + }, + } + case *kedav1alpha1.ScaledJob: + withTriggers = &kedav1alpha1.WithTriggers{ + TypeMeta: obj.TypeMeta, + ObjectMeta: obj.ObjectMeta, + Spec: kedav1alpha1.WithTriggersSpec{ + PollingInterval: obj.Spec.PollingInterval, + Triggers: obj.Spec.Triggers, + }, + } + default: + // here could be the conversion from unknown Duck type potentially in the future + return nil, fmt.Errorf("unknown scalable object type %v", scalableObject) + } + return withTriggers, nil +} + +func closeScalers(scalers []scalers.Scaler) { + for _, scaler := range scalers { + defer scaler.Close() + } +} + +func getPollingInterval(withTriggers *kedav1alpha1.WithTriggers) time.Duration { + if withTriggers.Spec.PollingInterval != nil { + return time.Second * time.Duration(*withTriggers.Spec.PollingInterval) + } + + return time.Second * time.Duration(defaultPollingInterval) +} + +func generateKey(scalableObject *kedav1alpha1.WithTriggers) string { + return fmt.Sprintf("%s.%s.%s", scalableObject.Kind, scalableObject.Namespace, scalableObject.Name) +} diff --git a/pkg/util/gvkr.go b/pkg/util/gvkr.go new file mode 100644 index 00000000000..b0bc6ce27e5 --- /dev/null +++ b/pkg/util/gvkr.go @@ -0,0 +1,68 @@ +package util + +import ( + kedav1alpha1 "github.com/kedacore/keda/pkg/apis/keda/v1alpha1" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + defaultVersion = "v1" + defaultGroup = "apps" + defaultKind = "Deployment" + defaultResource = "deployments" +) + +// ParseGVKR returns GroupVersionKindResource for specified apiVersion (groupVersion) and Kind +func ParseGVKR(restMapper meta.RESTMapper, apiVersion string, kind string) (kedav1alpha1.GroupVersionKindResource, error) { + var group, version, resource string + + // if apiVersion is not specified, we suppose the default one should be used + if apiVersion == "" { + group = defaultGroup + version = defaultVersion + } else { + groupVersion, err := schema.ParseGroupVersion(apiVersion) + if err != nil { + return kedav1alpha1.GroupVersionKindResource{}, err + } + + group = groupVersion.Group + version = groupVersion.Version + } + + // if kind is not specified, we suppose that default one should be used + if kind == "" { + kind = defaultKind + } + + // get resource + resource, err := getResource(restMapper, group, version, kind) + if err != nil { + return kedav1alpha1.GroupVersionKindResource{}, err + } + + return kedav1alpha1.GroupVersionKindResource{ + Group: group, + Version: version, + Kind: kind, + Resource: resource, + }, nil +} + +func getResource(restMapper meta.RESTMapper, group string, version string, kind string) (string, error) { + switch kind { + case defaultKind: + return defaultResource, nil + case "StatefulSet": + return "statefulsets", nil + default: + restmapping, err := restMapper.RESTMapping(schema.GroupKind{Group: group, Kind: kind}, version) + if err == nil { + return restmapping.Resource.GroupResource().Resource, nil + } else { + return "", err + } + } +} diff --git a/pkg/util/k8sversion.go b/pkg/util/k8sversion.go new file mode 100644 index 00000000000..46dc90da1c2 --- /dev/null +++ b/pkg/util/k8sversion.go @@ -0,0 +1,35 @@ +package util + +import ( + "strconv" + + "k8s.io/apimachinery/pkg/version" +) + +type K8sVersion struct { + Version *version.Info + MinorVersion int + PrettyVersion string + Parsed bool +} + +func NewK8sVersion(version *version.Info) K8sVersion { + minorTrimmed := "" + if len(version.Minor) > 2 { + minorTrimmed = version.Minor[:2] + } + + parsed := false + minor, err := strconv.Atoi(minorTrimmed) + if err == nil { + parsed = true + } + + k8sVersion := new(K8sVersion) + k8sVersion.Parsed = parsed + k8sVersion.Version = version + k8sVersion.MinorVersion = minor + k8sVersion.PrettyVersion = version.Major + "." + version.Minor + + return *k8sVersion +} diff --git a/tests/scalers/artemis.test.ts b/tests/scalers/artemis.test.ts index 0bc781a8da5..f9837303c45 100644 --- a/tests/scalers/artemis.test.ts +++ b/tests/scalers/artemis.test.ts @@ -76,7 +76,7 @@ test.after.always.cb('clean up artemis deployment', t => { const scaledObjectYaml=` -apiVersion: keda.k8s.io/v1alpha1 +apiVersion: keda.sh/v1alpha1 kind: TriggerAuthentication metadata: name: trigger-auth-kedartemis @@ -89,15 +89,13 @@ spec: name: kedartemis key: artemis-password --- -apiVersion: keda.k8s.io/v1alpha1 +apiVersion: keda.sh/v1alpha1 kind: ScaledObject metadata: name: kedartemis-consumer-scaled-object - labels: - deploymentName: kedartemis-consumer spec: scaleTargetRef: - deploymentName: kedartemis-consumer + name: kedartemis-consumer pollingInterval: 3 # Optional. Default: 30 seconds cooldownPeriod: 10 # Optional. Default: 300 seconds minReplicaCount: 0 # Optional. Default: 0 diff --git a/tests/scalers/azure-blob.test.ts b/tests/scalers/azure-blob.test.ts index f55350f70b3..ad31bda36af 100644 --- a/tests/scalers/azure-blob.test.ts +++ b/tests/scalers/azure-blob.test.ts @@ -64,7 +64,7 @@ test.after.always('clean up azure-blob deployment', t => { const resources = [ 'secret/test-secrets', 'deployment.apps/test-deployment', - 'scaledobject.keda.k8s.io/test-scaledobject' + 'scaledobject.keda.sh/test-scaledobject' ] for (const resource of resources) { @@ -124,15 +124,13 @@ spec: nodeSelector: beta.kubernetes.io/os: linux --- -apiVersion: keda.k8s.io/v1alpha1 +apiVersion: keda.sh/v1alpha1 kind: ScaledObject metadata: name: test-scaledobject - labels: - deploymentName: test-deployment spec: scaleTargetRef: - deploymentName: test-deployment + name: test-deployment pollingInterval: 20 maxReplicaCount: 2 cooldownPeriod: 10 diff --git a/tests/scalers/azure-queue-trigger-auth.test.ts b/tests/scalers/azure-queue-trigger-auth.test.ts index 5c982d6a3f9..36640771b0c 100644 --- a/tests/scalers/azure-queue-trigger-auth.test.ts +++ b/tests/scalers/azure-queue-trigger-auth.test.ts @@ -80,7 +80,7 @@ test.after.always.cb('clean up azure-queue deployment', t => { const resources = [ 'secret/test-secrets', 'deployment.apps/test-deployment', - 'scaledobject.keda.k8s.io/test-scaledobject', + 'scaledobject.keda.sh/test-scaledobject', ] for (const resource of resources) { @@ -131,15 +131,13 @@ spec: - name: FUNCTIONS_WORKER_RUNTIME value: node --- -apiVersion: keda.k8s.io/v1alpha1 +apiVersion: keda.sh/v1alpha1 kind: ScaledObject metadata: name: test-scaledobject - labels: - deploymentName: test-deployment spec: scaleTargetRef: - deploymentName: test-deployment + name: test-deployment pollingInterval: 5 maxReplicaCount: 1 cooldownPeriod: 10 @@ -150,7 +148,7 @@ spec: metadata: queueName: ${queueName} --- -apiVersion: keda.k8s.io/v1alpha1 +apiVersion: keda.sh/v1alpha1 kind: TriggerAuthentication metadata: name: azure-queue-auth diff --git a/tests/scalers/azure-queue.test.ts b/tests/scalers/azure-queue.test.ts index 8f88dfaa703..175f68ac75c 100644 --- a/tests/scalers/azure-queue.test.ts +++ b/tests/scalers/azure-queue.test.ts @@ -78,7 +78,7 @@ test.after.always.cb('clean up azure-queue deployment', t => { const resources = [ 'secret/test-secrets', 'deployment.apps/test-deployment', - 'scaledobject.keda.k8s.io/test-scaledobject', + 'scaledobject.keda.sh/test-scaledobject', ] for (const resource of resources) { @@ -134,15 +134,13 @@ spec: name: test-secrets key: AzureWebJobsStorage --- -apiVersion: keda.k8s.io/v1alpha1 +apiVersion: keda.sh/v1alpha1 kind: ScaledObject metadata: name: test-scaledobject - labels: - deploymentName: test-deployment spec: scaleTargetRef: - deploymentName: test-deployment + name: test-deployment pollingInterval: 5 maxReplicaCount: 4 cooldownPeriod: 10 diff --git a/tests/scalers/kafka.test.ts b/tests/scalers/kafka.test.ts index 7f08b75f0d4..d6885774715 100644 --- a/tests/scalers/kafka.test.ts +++ b/tests/scalers/kafka.test.ts @@ -230,16 +230,14 @@ spec: - secretRef: name: twitter-function --- -apiVersion: keda.k8s.io/v1alpha1 +apiVersion: keda.sh/v1alpha1 kind: ScaledObject metadata: name: twitter-function namespace: ${defaultNamespace} - labels: - deploymentName: twitter-function spec: scaleTargetRef: - deploymentName: twitter-function + name: twitter-function triggers: - type: kafka metadata: diff --git a/tests/scalers/prometheus.test.ts b/tests/scalers/prometheus.test.ts index 89fa55aaa82..151cd4e801b 100644 --- a/tests/scalers/prometheus.test.ts +++ b/tests/scalers/prometheus.test.ts @@ -102,7 +102,7 @@ test.after.always.cb('clean up prometheus deployment', t => { 'deployment.apps/test-app', 'deployment.apps/keda-test-app', 'service/test-app', - 'scaledobject.keda.k8s.io/prometheus-scaledobject', + 'scaledobject.keda.sh/prometheus-scaledobject', 'job/generate-requests', ] @@ -179,13 +179,13 @@ spec: selector: type: keda-testing --- -apiVersion: keda.k8s.io/v1alpha1 +apiVersion: keda.sh/v1alpha1 kind: ScaledObject metadata: name: prometheus-scaledobject spec: scaleTargetRef: - deploymentName: keda-test-app + name: keda-test-app minReplicaCount: 0 maxReplicaCount: 5 pollingInterval: 5 diff --git a/tests/scalers/rabbitmq-queue-amqp.test.ts b/tests/scalers/rabbitmq-queue-amqp.test.ts index 96e9e685336..f7531c2c500 100644 --- a/tests/scalers/rabbitmq-queue-amqp.test.ts +++ b/tests/scalers/rabbitmq-queue-amqp.test.ts @@ -62,7 +62,7 @@ test.after.always.cb('clean up rabbitmq-queue deployment', t => { const resources = [ 'secret/test-secrets', 'deployment.apps/test-deployment', - 'scaledobject.keda.k8s.io/test-scaledobject', + 'scaledobject.keda.sh/test-scaledobject', ] for (const resource of resources) { @@ -109,15 +109,13 @@ spec: - secretRef: name: test-secrets --- -apiVersion: keda.k8s.io/v1alpha1 +apiVersion: keda.sh/v1alpha1 kind: ScaledObject metadata: name: test-scaledobject - labels: - deploymentName: test-deployment spec: scaleTargetRef: - deploymentName: test-deployment + name: test-deployment pollingInterval: 5 cooldownPeriod: 10 minReplicaCount: 0 diff --git a/tests/scalers/rabbitmq-queue-http.test.ts b/tests/scalers/rabbitmq-queue-http.test.ts index 418a75fe2f0..d6edfab26ef 100644 --- a/tests/scalers/rabbitmq-queue-http.test.ts +++ b/tests/scalers/rabbitmq-queue-http.test.ts @@ -64,7 +64,7 @@ test.after.always.cb('clean up rabbitmq-queue deployment', t => { const resources = [ 'secret/test-secrets-api', 'deployment.apps/test-deployment', - 'scaledobject.keda.k8s.io/test-scaledobject', + 'scaledobject.keda.sh/test-scaledobject', ] for (const resource of resources) { @@ -111,15 +111,13 @@ spec: - secretRef: name: test-secrets-api --- -apiVersion: keda.k8s.io/v1alpha1 +apiVersion: keda.sh/v1alpha1 kind: ScaledObject metadata: name: test-scaledobject - labels: - deploymentName: test-deployment spec: scaleTargetRef: - deploymentName: test-deployment + name: test-deployment pollingInterval: 5 cooldownPeriod: 10 minReplicaCount: 0 diff --git a/tests/scalers/rabbitmq-queue-trigger-auth.test.ts b/tests/scalers/rabbitmq-queue-trigger-auth.test.ts index caa7011d2a4..963e0ef43e1 100644 --- a/tests/scalers/rabbitmq-queue-trigger-auth.test.ts +++ b/tests/scalers/rabbitmq-queue-trigger-auth.test.ts @@ -63,8 +63,8 @@ test.after.always.cb('clean up rabbitmq-queue deployment', t => { const resources = [ 'secret/test-secrets', 'deployment.apps/test-deployment', - 'scaledobject.keda.k8s.io/test-scaledobject', - 'triggerauthentications.keda.k8s.io/rabbitmq-trigger-auth', + 'scaledobject.keda.sh/test-scaledobject', + 'triggerauthentications.keda.sh/rabbitmq-trigger-auth', ] for (const resource of resources) { @@ -109,7 +109,7 @@ spec: args: - '{{CONNECTION_STRING}}' --- -apiVersion: keda.k8s.io/v1alpha1 +apiVersion: keda.sh/v1alpha1 kind: TriggerAuthentication metadata: name: rabbitmq-trigger-auth @@ -119,15 +119,13 @@ spec: name: rabbitmq-test-secret key: RabbitMqHost --- -apiVersion: keda.k8s.io/v1alpha1 +apiVersion: keda.sh/v1alpha1 kind: ScaledObject metadata: name: test-scaledobject - labels: - deploymentName: test-deployment spec: scaleTargetRef: - deploymentName: test-deployment + name: test-deployment pollingInterval: 5 cooldownPeriod: 10 minReplicaCount: 0 diff --git a/tests/scalers/redis-streams.test.ts b/tests/scalers/redis-streams.test.ts index 1f425c0a4a8..e5a669978a3 100644 --- a/tests/scalers/redis-streams.test.ts +++ b/tests/scalers/redis-streams.test.ts @@ -103,8 +103,8 @@ test.after.always.cb('clean up deployment', t => { const resources = [ 'secret/redis-password', 'deployment/redis-streams-consumer', - 'scaledobject.keda.k8s.io/redis-streams-scaledobject', - 'triggerauthentications.keda.k8s.io/keda-redis-stream-triggerauth' + 'scaledobject.keda.sh/redis-streams-scaledobject', + 'triggerauthentications.keda.sh/keda-redis-stream-triggerauth' ] for (const resource of resources) { @@ -159,7 +159,7 @@ type: Opaque data: password: {{REDIS_PASSWORD}} --- -apiVersion: keda.k8s.io/v1alpha1 +apiVersion: keda.sh/v1alpha1 kind: TriggerAuthentication metadata: name: keda-redis-stream-triggerauth @@ -200,15 +200,13 @@ spec: name: redis-password key: password --- -apiVersion: keda.k8s.io/v1alpha1 +apiVersion: keda.sh/v1alpha1 kind: ScaledObject metadata: name: redis-streams-scaledobject - labels: - deploymentName: redis-streams-consumer spec: scaleTargetRef: - deploymentName: redis-streams-consumer + name: redis-streams-consumer pollingInterval: 5 cooldownPeriod: 10 minReplicaCount: 1 diff --git a/tests/setup.test.ts b/tests/setup.test.ts index 2e3a9d6f01e..d7765ab9048 100644 --- a/tests/setup.test.ts +++ b/tests/setup.test.ts @@ -43,14 +43,17 @@ test.serial('Deploy Keda', t => { } } - if (sh.exec('kubectl apply -f ../deploy/crds/keda.k8s.io_scaledobjects_crd.yaml').code !== 0) { - t.fail('error deploying keda. ' + result) + if (sh.exec('kubectl apply -f ../deploy/crds/keda.sh_scaledobjects_crd.yaml').code !== 0) { + t.fail('error deploying ScaledObject CRD. ' + result) + } + if (sh.exec('kubectl apply -f ../deploy/crds/keda.sh_scaledjobs_crd.yaml --validate=false').code !== 0) { + t.fail('error deploying ScaledJob CRD. ' + result) } if ( - sh.exec('kubectl apply -f ../deploy/crds/keda.k8s.io_triggerauthentications_crd.yaml').code !== + sh.exec('kubectl apply -f ../deploy/crds/keda.sh_triggerauthentications_crd.yaml').code !== 0 ) { - t.fail('error deploying keda. ' + result) + t.fail('error deploying TriggerAuthentication CRD. ' + result) } if (sh.exec('kubectl apply -f ../deploy/').code !== 0) { t.fail('error deploying keda. ' + result) diff --git a/tools/build-tools.Dockerfile b/tools/build-tools.Dockerfile index 1334b9d0ed3..0cf2426f1f9 100644 --- a/tools/build-tools.Dockerfile +++ b/tools/build-tools.Dockerfile @@ -6,6 +6,8 @@ RUN apt-get update && \ # Install azure-cli RUN apt-get install apt-transport-https lsb-release software-properties-common dirmngr -y && \ + curl -sL https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor | \ + tee /etc/apt/trusted.gpg.d/microsoft.asc.gpg > /dev/null && \ AZ_REPO=$(lsb_release -cs) && \ echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ $AZ_REPO main" | \ tee /etc/apt/sources.list.d/azure-cli.list && \ @@ -16,7 +18,7 @@ RUN apt-get install apt-transport-https lsb-release software-properties-common d apt-get install -y azure-cli # Install docker client -RUN wget https://download.docker.com/linux/static/stable/x86_64/docker-19.03.2.tgz && \ +RUN curl -LO https://download.docker.com/linux/static/stable/x86_64/docker-19.03.2.tgz && \ docker_sha256=865038730c79ab48dfed1365ee7627606405c037f46c9ae17c5ec1f487da1375 && \ echo "$docker_sha256 docker-19.03.2.tgz" | sha256sum -c - && \ tar xvzf docker-19.03.2.tgz && \ @@ -25,7 +27,7 @@ RUN wget https://download.docker.com/linux/static/stable/x86_64/docker-19.03.2.t # Install golang RUN GO_VERSION=1.13.3 && \ - wget https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz && \ + curl -LO https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz && \ go_sha256=0804bf02020dceaa8a7d7275ee79f7a142f1996bfd0c39216ccb405f93f994c0 && \ echo "$go_sha256 go${GO_VERSION}.linux-amd64.tar.gz" | sha256sum -c - && \ tar -C /usr/local -xvzf go${GO_VERSION}.linux-amd64.tar.gz && \ @@ -33,7 +35,7 @@ RUN GO_VERSION=1.13.3 && \ # Install helm/tiller RUN HELM_VERSION=v2.16.1 && \ - wget https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz && \ + curl -LO https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz && \ helm_sha256=7eebaaa2da4734242bbcdced62cc32ba8c7164a18792c8acdf16c77abffce202 && \ echo "$helm_sha256 helm-${HELM_VERSION}-linux-amd64.tar.gz" | sha256sum -c - && \ tar xzvf helm-${HELM_VERSION}-linux-amd64.tar.gz && \ @@ -53,10 +55,10 @@ RUN curl -sL https://deb.nodesource.com/setup_12.x | bash - && \ apt-get install -y nodejs # Install operator-sdk -RUN RELEASE_VERSION=v0.12.0 && \ +RUN RELEASE_VERSION=v0.18.1 && \ curl -LO https://github.com/operator-framework/operator-sdk/releases/download/${RELEASE_VERSION}/operator-sdk-${RELEASE_VERSION}-x86_64-linux-gnu && \ curl -LO https://github.com/operator-framework/operator-sdk/releases/download/${RELEASE_VERSION}/operator-sdk-${RELEASE_VERSION}-x86_64-linux-gnu.asc && \ - gpg --keyserver keyserver.ubuntu.com --recv-key 0CF50BEE7E4DF6445E08C0EA9AFDE59E90D2B445 && \ + gpg --keyserver keyserver.ubuntu.com --recv-key 8018D6F1B58E194625E38581D16086E39AF46519 && \ gpg --verify operator-sdk-${RELEASE_VERSION}-x86_64-linux-gnu.asc && \ chmod +x operator-sdk-${RELEASE_VERSION}-x86_64-linux-gnu && \ mkdir -p /usr/local/bin/ && \ diff --git a/version/version.go b/version/version.go index 9a2b77ccd25..c6af2c00294 100644 --- a/version/version.go +++ b/version/version.go @@ -1,6 +1,6 @@ package version var ( - Version = "1.5.0" + Version = "2.0.0-alpha1" GitCommit string )