From c3b38d86d81667629845931374230378e53c898f Mon Sep 17 00:00:00 2001 From: Anton Kirillov Date: Mon, 6 Jul 2020 11:13:57 -0700 Subject: [PATCH] Adding retries for gRPC calls (#1248) --- Gopkg.lock | 30 +- Gopkg.toml | 4 + pkg/controller.v1alpha3/consts/const.go | 10 +- .../suggestionclient/suggestionclient.go | 14 +- pkg/controller.v1beta1/consts/const.go | 10 +- .../suggestionclient/suggestionclient.go | 12 +- .../grpc-ecosystem/go-grpc-middleware/LICENSE | 201 ++++++ .../go-grpc-middleware/retry/backoff.go | 44 ++ .../go-grpc-middleware/retry/doc.go | 25 + .../go-grpc-middleware/retry/options.go | 140 ++++ .../go-grpc-middleware/retry/retry.go | 321 +++++++++ .../util/backoffutils/backoff.go | 28 + .../go-grpc-middleware/util/metautils/doc.go | 19 + .../util/metautils/nicemd.go | 126 ++++ .../util/metautils/single_key.go | 22 + .../client-go/discovery/fake/discovery.go | 131 ++++ vendor/k8s.io/client-go/testing/actions.go | 659 ++++++++++++++++++ vendor/k8s.io/client-go/testing/fake.go | 213 ++++++ vendor/k8s.io/client-go/testing/fixture.go | 530 ++++++++++++++ 19 files changed, 2532 insertions(+), 7 deletions(-) create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/single_key.go create mode 100644 vendor/k8s.io/client-go/discovery/fake/discovery.go create mode 100644 vendor/k8s.io/client-go/testing/actions.go create mode 100644 vendor/k8s.io/client-go/testing/fake.go create mode 100644 vendor/k8s.io/client-go/testing/fixture.go diff --git a/Gopkg.lock b/Gopkg.lock index c2fe0fa2f10..b2ecede8737 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -21,7 +21,7 @@ version = "v33.2.0" [[projects]] - digest = "1:7dc53e012b9e58f6fc52dce9c201d38dbc3c0e573bad4a56fa0f861823934915" + digest = "0:" name = "github.com/Azure/go-autorest" packages = [ "autorest", @@ -361,6 +361,18 @@ pruneopts = "NUT" revision = "3befbb6ad0cc97d4c25d851e9528915809e1a22f" +[[projects]] + digest = "1:565241cb2792bb47f174ca83a7b889a2c085d56837e8e60bfeffe00e1419f76f" + name = "github.com/grpc-ecosystem/go-grpc-middleware" + packages = [ + "retry", + "util/backoffutils", + "util/metautils", + ] + pruneopts = "NUT" + revision = "3c51f7f332123e8be5a157c0802a228ac85bf9db" + version = "v1.2.0" + [[projects]] digest = "1:7ad601623f5af048e72d0c712e2ce6647dbbd14d9e18c31e3464156de8c567ee" name = "github.com/grpc-ecosystem/grpc-gateway" @@ -1165,10 +1177,11 @@ version = "kubernetes-1.12.9" [[projects]] - digest = "1:3eddbb7af5a4d40c8cbd3b43f1b1d3ac884150612e27f1a618118af08141408a" + digest = "1:04624b2578bd24097709f6d78bec8e648260fc29330c8a9e5a4b3eee57bf6296" name = "k8s.io/client-go" packages = [ "discovery", + "discovery/fake", "dynamic", "kubernetes", "kubernetes/scheme", @@ -1212,6 +1225,7 @@ "rest", "rest/watch", "restmapper", + "testing", "third_party/forked/golang/template", "tools/auth", "tools/cache", @@ -1291,7 +1305,7 @@ revision = "39cb288412c48cb533ba4be5d6c28620b9a0c1b4" [[projects]] - digest = "1:ea0871075d014a0c553b8430c4b162f8dd6397bf25a4938cadcd56d3e05f7afe" + digest = "0:" name = "k8s.io/kubernetes" packages = [ "pkg/cloudprovider/providers/azure/auth", @@ -1418,6 +1432,7 @@ "github.com/google/go-containerregistry/pkg/name", "github.com/google/go-containerregistry/pkg/v1", "github.com/google/go-containerregistry/pkg/v1/remote", + "github.com/grpc-ecosystem/go-grpc-middleware/retry", "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway", "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger", "github.com/grpc-ecosystem/grpc-gateway/runtime", @@ -1426,6 +1441,7 @@ "github.com/kubeflow/pytorch-operator/pkg/apis/pytorch/v1", "github.com/kubeflow/tf-operator/pkg/apis/common/v1", "github.com/kubeflow/tf-operator/pkg/apis/tensorflow/v1", + "github.com/mattbaird/jsonpatch", "github.com/mitchellh/go-ps", "github.com/onsi/ginkgo", "github.com/onsi/gomega", @@ -1454,14 +1470,22 @@ "k8s.io/apimachinery/pkg/labels", "k8s.io/apimachinery/pkg/runtime", "k8s.io/apimachinery/pkg/runtime/schema", + "k8s.io/apimachinery/pkg/runtime/serializer", "k8s.io/apimachinery/pkg/types", "k8s.io/apimachinery/pkg/util/intstr", "k8s.io/apimachinery/pkg/util/rand", + "k8s.io/apimachinery/pkg/util/runtime", "k8s.io/apimachinery/pkg/util/yaml", + "k8s.io/apimachinery/pkg/watch", + "k8s.io/client-go/discovery", + "k8s.io/client-go/discovery/fake", "k8s.io/client-go/kubernetes/scheme", "k8s.io/client-go/plugin/pkg/client/auth/gcp", "k8s.io/client-go/rest", + "k8s.io/client-go/testing", + "k8s.io/client-go/tools/cache", "k8s.io/client-go/tools/record", + "k8s.io/client-go/util/flowcontrol", "k8s.io/code-generator/cmd/deepcopy-gen", "k8s.io/code-generator/cmd/openapi-gen", "k8s.io/klog", diff --git a/Gopkg.toml b/Gopkg.toml index 9978eadbe41..001ce597122 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -115,3 +115,7 @@ required = [ [[prune.project]] name = "k8s.io/gengo" unused-packages = false + +[[constraint]] + name = "github.com/grpc-ecosystem/go-grpc-middleware" + version = "1.2.0" diff --git a/pkg/controller.v1alpha3/consts/const.go b/pkg/controller.v1alpha3/consts/const.go index 9af0e44c57d..ffdd2da15b4 100644 --- a/pkg/controller.v1alpha3/consts/const.go +++ b/pkg/controller.v1alpha3/consts/const.go @@ -1,6 +1,9 @@ package consts -import "github.com/kubeflow/katib/pkg/util/v1alpha3/env" +import ( + "github.com/kubeflow/katib/pkg/util/v1alpha3/env" + "time" +) const ( // ConfigExperimentSuggestionName is the config name of the @@ -35,6 +38,11 @@ const ( // which is used to run healthz check using grpc probe. DefaultGRPCService = "manager.v1alpha3.Suggestion" + // DefaultGRPCRetryAttempts is the the maximum number of retries for gRPC calls + DefaultGRPCRetryAttempts = 10 + // DefaultGRPCRetryPeriod is a fixed period of time between gRPC call retries + DefaultGRPCRetryPeriod = 3 * time.Second + // DefaultKatibNamespaceEnvName is the default env name of katib namespace DefaultKatibNamespaceEnvName = "KATIB_CORE_NAMESPACE" // DefaultKatibComposerEnvName is the default env name of katib suggestion composer diff --git a/pkg/controller.v1alpha3/suggestion/suggestionclient/suggestionclient.go b/pkg/controller.v1alpha3/suggestion/suggestionclient/suggestionclient.go index 63e4db6213e..d0c3be37fe2 100644 --- a/pkg/controller.v1alpha3/suggestion/suggestionclient/suggestionclient.go +++ b/pkg/controller.v1alpha3/suggestion/suggestionclient/suggestionclient.go @@ -5,6 +5,9 @@ import ( "fmt" "time" + "github.com/kubeflow/katib/pkg/controller.v1alpha3/consts" + + grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -105,7 +108,15 @@ func (g *General) SyncAssignments( func (g *General) ValidateAlgorithmSettings(instance *suggestionsv1alpha3.Suggestion, e *experimentsv1alpha3.Experiment) error { logger := log.WithValues("Suggestion", types.NamespacedName{Name: instance.GetName(), Namespace: instance.GetNamespace()}) endpoint := util.GetAlgorithmEndpoint(instance) - conn, err := grpc.Dial(endpoint, grpc.WithInsecure()) + + callOpts := []grpc_retry.CallOption{ + grpc_retry.WithBackoff(grpc_retry.BackoffLinear(consts.DefaultGRPCRetryPeriod)), + grpc_retry.WithMax(consts.DefaultGRPCRetryAttempts), + } + conn, err := grpc.Dial(endpoint, grpc.WithInsecure(), + grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(callOpts...)), + grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor(callOpts...)), + ) if err != nil { return err } @@ -118,6 +129,7 @@ func (g *General) ValidateAlgorithmSettings(instance *suggestionsv1alpha3.Sugges request := &suggestionapi.ValidateAlgorithmSettingsRequest{ Experiment: g.ConvertExperiment(e), } + // See https://github.com/grpc/grpc-go/issues/2636 // See https://github.com/grpc/grpc-go/pull/2503 _, err = client.ValidateAlgorithmSettings(ctx, request, grpc.WaitForReady(true)) diff --git a/pkg/controller.v1beta1/consts/const.go b/pkg/controller.v1beta1/consts/const.go index b90d7795435..2d9c76c7ab9 100644 --- a/pkg/controller.v1beta1/consts/const.go +++ b/pkg/controller.v1beta1/consts/const.go @@ -1,6 +1,9 @@ package consts -import "github.com/kubeflow/katib/pkg/util/v1beta1/env" +import ( + "github.com/kubeflow/katib/pkg/util/v1beta1/env" + "time" +) const ( // ConfigExperimentSuggestionName is the config name of the @@ -35,6 +38,11 @@ const ( // which is used to run healthz check using grpc probe. DefaultGRPCService = "manager.v1beta1.Suggestion" + // DefaultGRPCRetryAttempts is the the maximum number of retries for gRPC calls + DefaultGRPCRetryAttempts = 10 + // DefaultGRPCRetryPeriod is a fixed period of time between gRPC call retries + DefaultGRPCRetryPeriod = 3 * time.Second + // DefaultKatibNamespaceEnvName is the default env name of katib namespace DefaultKatibNamespaceEnvName = "KATIB_CORE_NAMESPACE" // DefaultKatibComposerEnvName is the default env name of katib suggestion composer diff --git a/pkg/controller.v1beta1/suggestion/suggestionclient/suggestionclient.go b/pkg/controller.v1beta1/suggestion/suggestionclient/suggestionclient.go index 3977098f8ec..bb79db8c21f 100644 --- a/pkg/controller.v1beta1/suggestion/suggestionclient/suggestionclient.go +++ b/pkg/controller.v1beta1/suggestion/suggestionclient/suggestionclient.go @@ -5,6 +5,8 @@ import ( "fmt" "time" + grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" + "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -110,7 +112,15 @@ func (g *General) SyncAssignments( func (g *General) ValidateAlgorithmSettings(instance *suggestionsv1beta1.Suggestion, e *experimentsv1beta1.Experiment) error { logger := log.WithValues("Suggestion", types.NamespacedName{Name: instance.GetName(), Namespace: instance.GetNamespace()}) endpoint := util.GetAlgorithmEndpoint(instance) - conn, err := grpc.Dial(endpoint, grpc.WithInsecure()) + + callOpts := []grpc_retry.CallOption{ + grpc_retry.WithBackoff(grpc_retry.BackoffLinear(consts.DefaultGRPCRetryPeriod)), + grpc_retry.WithMax(consts.DefaultGRPCRetryAttempts), + } + conn, err := grpc.Dial(endpoint, grpc.WithInsecure(), + grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(callOpts...)), + grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor(callOpts...)), + ) if err != nil { return err } diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE new file mode 100644 index 00000000000..b2b065037fc --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go new file mode 100644 index 00000000000..ad35f09a87f --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go @@ -0,0 +1,44 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_retry + +import ( + "time" + + "github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils" +) + +// BackoffLinear is very simple: it waits for a fixed period of time between calls. +func BackoffLinear(waitBetween time.Duration) BackoffFunc { + return func(attempt uint) time.Duration { + return waitBetween + } +} + +// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment). +// +// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms. +func BackoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) BackoffFunc { + return func(attempt uint) time.Duration { + return backoffutils.JitterUp(waitBetween, jitterFraction) + } +} + +// BackoffExponential produces increasing intervals for each attempt. +// +// The scalar is multiplied times 2 raised to the current attempt. So the first +// retry with a scalar of 100ms is 100ms, while the 5th attempt would be 1.6s. +func BackoffExponential(scalar time.Duration) BackoffFunc { + return func(attempt uint) time.Duration { + return scalar * time.Duration(backoffutils.ExponentBase2(attempt)) + } +} + +// BackoffExponentialWithJitter creates an exponential backoff like +// BackoffExponential does, but adds jitter. +func BackoffExponentialWithJitter(scalar time.Duration, jitterFraction float64) BackoffFunc { + return func(attempt uint) time.Duration { + return backoffutils.JitterUp(scalar*time.Duration(backoffutils.ExponentBase2(attempt)), jitterFraction) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go new file mode 100644 index 00000000000..afd924a1406 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go @@ -0,0 +1,25 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +/* +`grpc_retry` provides client-side request retry logic for gRPC. + +Client-Side Request Retry Interceptor + +It allows for automatic retry, inside the generated gRPC code of requests based on the gRPC status +of the reply. It supports unary (1:1), and server stream (1:n) requests. + +By default the interceptors *are disabled*, preventing accidental use of retries. You can easily +override the number of retries (setting them to more than 0) with a `grpc.ClientOption`, e.g.: + + myclient.Ping(ctx, goodPing, grpc_retry.WithMax(5)) + +Other default options are: retry on `ResourceExhausted` and `Unavailable` gRPC codes, use a 50ms +linear backoff with 10% jitter. + +For chained interceptors, the retry interceptor will call every interceptor that follows it +whenever when a retry happens. + +Please see examples for more advanced use. +*/ +package grpc_retry diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go new file mode 100644 index 00000000000..1b3fb1a527d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go @@ -0,0 +1,140 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_retry + +import ( + "context" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + // DefaultRetriableCodes is a set of well known types gRPC codes that should be retri-able. + // + // `ResourceExhausted` means that the user quota, e.g. per-RPC limits, have been reached. + // `Unavailable` means that system is currently unavailable and the client should retry again. + DefaultRetriableCodes = []codes.Code{codes.ResourceExhausted, codes.Unavailable} + + defaultOptions = &options{ + max: 0, // disabled + perCallTimeout: 0, // disabled + includeHeader: true, + codes: DefaultRetriableCodes, + backoffFunc: BackoffFuncContext(func(ctx context.Context, attempt uint) time.Duration { + return BackoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10)(attempt) + }), + } +) + +// BackoffFunc denotes a family of functions that control the backoff duration between call retries. +// +// They are called with an identifier of the attempt, and should return a time the system client should +// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request +// the deadline of the request takes precedence and the wait will be interrupted before proceeding +// with the next iteration. +type BackoffFunc func(attempt uint) time.Duration + +// BackoffFuncContext denotes a family of functions that control the backoff duration between call retries. +// +// They are called with an identifier of the attempt, and should return a time the system client should +// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request +// the deadline of the request takes precedence and the wait will be interrupted before proceeding +// with the next iteration. The context can be used to extract request scoped metadata and context values. +type BackoffFuncContext func(ctx context.Context, attempt uint) time.Duration + +// Disable disables the retry behaviour on this call, or this interceptor. +// +// Its semantically the same to `WithMax` +func Disable() CallOption { + return WithMax(0) +} + +// WithMax sets the maximum number of retries on this call, or this interceptor. +func WithMax(maxRetries uint) CallOption { + return CallOption{applyFunc: func(o *options) { + o.max = maxRetries + }} +} + +// WithBackoff sets the `BackoffFunc` used to control time between retries. +func WithBackoff(bf BackoffFunc) CallOption { + return CallOption{applyFunc: func(o *options) { + o.backoffFunc = BackoffFuncContext(func(ctx context.Context, attempt uint) time.Duration { + return bf(attempt) + }) + }} +} + +// WithBackoffContext sets the `BackoffFuncContext` used to control time between retries. +func WithBackoffContext(bf BackoffFuncContext) CallOption { + return CallOption{applyFunc: func(o *options) { + o.backoffFunc = bf + }} +} + +// WithCodes sets which codes should be retried. +// +// Please *use with care*, as you may be retrying non-idempotent calls. +// +// You cannot automatically retry on Cancelled and Deadline, please use `WithPerRetryTimeout` for these. +func WithCodes(retryCodes ...codes.Code) CallOption { + return CallOption{applyFunc: func(o *options) { + o.codes = retryCodes + }} +} + +// WithPerRetryTimeout sets the RPC timeout per call (including initial call) on this call, or this interceptor. +// +// The context.Deadline of the call takes precedence and sets the maximum time the whole invocation +// will take, but WithPerRetryTimeout can be used to limit the RPC time per each call. +// +// For example, with context.Deadline = now + 10s, and WithPerRetryTimeout(3 * time.Seconds), each +// of the retry calls (including the initial one) will have a deadline of now + 3s. +// +// A value of 0 disables the timeout overrides completely and returns to each retry call using the +// parent `context.Deadline`. +func WithPerRetryTimeout(timeout time.Duration) CallOption { + return CallOption{applyFunc: func(o *options) { + o.perCallTimeout = timeout + }} +} + +type options struct { + max uint + perCallTimeout time.Duration + includeHeader bool + codes []codes.Code + backoffFunc BackoffFuncContext +} + +// CallOption is a grpc.CallOption that is local to grpc_retry. +type CallOption struct { + grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic. + applyFunc func(opt *options) +} + +func reuseOrNewWithCallOptions(opt *options, callOptions []CallOption) *options { + if len(callOptions) == 0 { + return opt + } + optCopy := &options{} + *optCopy = *opt + for _, f := range callOptions { + f.applyFunc(optCopy) + } + return optCopy +} + +func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []CallOption) { + for _, opt := range callOptions { + if co, ok := opt.(CallOption); ok { + retryOptions = append(retryOptions, co) + } else { + grpcOptions = append(grpcOptions, opt) + } + } + return grpcOptions, retryOptions +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go new file mode 100644 index 00000000000..9a6b48d8fa9 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go @@ -0,0 +1,321 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_retry + +import ( + "context" + "fmt" + "io" + "sync" + "time" + + "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" + "golang.org/x/net/trace" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +const ( + AttemptMetadataKey = "x-retry-attempty" +) + +// UnaryClientInterceptor returns a new retrying unary client interceptor. +// +// The default configuration of the interceptor is to not retry *at all*. This behaviour can be +// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). +func UnaryClientInterceptor(optFuncs ...CallOption) grpc.UnaryClientInterceptor { + intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) + return func(parentCtx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + grpcOpts, retryOpts := filterCallOptions(opts) + callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) + // short circuit for simplicity, and avoiding allocations. + if callOpts.max == 0 { + return invoker(parentCtx, method, req, reply, cc, grpcOpts...) + } + var lastErr error + for attempt := uint(0); attempt < callOpts.max; attempt++ { + if err := waitRetryBackoff(attempt, parentCtx, callOpts); err != nil { + return err + } + callCtx := perCallContext(parentCtx, callOpts, attempt) + lastErr = invoker(callCtx, method, req, reply, cc, grpcOpts...) + // TODO(mwitkow): Maybe dial and transport errors should be retriable? + if lastErr == nil { + return nil + } + logTrace(parentCtx, "grpc_retry attempt: %d, got err: %v", attempt, lastErr) + if isContextError(lastErr) { + if parentCtx.Err() != nil { + logTrace(parentCtx, "grpc_retry attempt: %d, parent context error: %v", attempt, parentCtx.Err()) + // its the parent context deadline or cancellation. + return lastErr + } else { + logTrace(parentCtx, "grpc_retry attempt: %d, context error from retry call", attempt) + // its the callCtx deadline or cancellation, in which case try again. + continue + } + } + if !isRetriable(lastErr, callOpts) { + return lastErr + } + } + return lastErr + } +} + +// StreamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls. +// +// The default configuration of the interceptor is to not retry *at all*. This behaviour can be +// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). +// +// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs +// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams, +// BidiStreams), the retry interceptor will fail the call. +func StreamClientInterceptor(optFuncs ...CallOption) grpc.StreamClientInterceptor { + intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) + return func(parentCtx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + grpcOpts, retryOpts := filterCallOptions(opts) + callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) + // short circuit for simplicity, and avoiding allocations. + if callOpts.max == 0 { + return streamer(parentCtx, desc, cc, method, grpcOpts...) + } + if desc.ClientStreams { + return nil, status.Errorf(codes.Unimplemented, "grpc_retry: cannot retry on ClientStreams, set grpc_retry.Disable()") + } + + var lastErr error + for attempt := uint(0); attempt < callOpts.max; attempt++ { + if err := waitRetryBackoff(attempt, parentCtx, callOpts); err != nil { + return nil, err + } + callCtx := perCallContext(parentCtx, callOpts, 0) + + var newStreamer grpc.ClientStream + newStreamer, lastErr = streamer(callCtx, desc, cc, method, grpcOpts...) + if lastErr == nil { + retryingStreamer := &serverStreamingRetryingStream{ + ClientStream: newStreamer, + callOpts: callOpts, + parentCtx: parentCtx, + streamerCall: func(ctx context.Context) (grpc.ClientStream, error) { + return streamer(ctx, desc, cc, method, grpcOpts...) + }, + } + return retryingStreamer, nil + } + + logTrace(parentCtx, "grpc_retry attempt: %d, got err: %v", attempt, lastErr) + if isContextError(lastErr) { + if parentCtx.Err() != nil { + logTrace(parentCtx, "grpc_retry attempt: %d, parent context error: %v", attempt, parentCtx.Err()) + // its the parent context deadline or cancellation. + return nil, lastErr + } else { + logTrace(parentCtx, "grpc_retry attempt: %d, context error from retry call", attempt) + // its the callCtx deadline or cancellation, in which case try again. + continue + } + } + if !isRetriable(lastErr, callOpts) { + return nil, lastErr + } + } + return nil, lastErr + } +} + +// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a +// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish +// a new ClientStream according to the retry policy. +type serverStreamingRetryingStream struct { + grpc.ClientStream + bufferedSends []interface{} // single message that the client can sen + receivedGood bool // indicates whether any prior receives were successful + wasClosedSend bool // indicates that CloseSend was closed + parentCtx context.Context + callOpts *options + streamerCall func(ctx context.Context) (grpc.ClientStream, error) + mu sync.RWMutex +} + +func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) { + s.mu.Lock() + s.ClientStream = clientStream + s.mu.Unlock() +} + +func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream { + s.mu.RLock() + defer s.mu.RUnlock() + return s.ClientStream +} + +func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error { + s.mu.Lock() + s.bufferedSends = append(s.bufferedSends, m) + s.mu.Unlock() + return s.getStream().SendMsg(m) +} + +func (s *serverStreamingRetryingStream) CloseSend() error { + s.mu.Lock() + s.wasClosedSend = true + s.mu.Unlock() + return s.getStream().CloseSend() +} + +func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) { + return s.getStream().Header() +} + +func (s *serverStreamingRetryingStream) Trailer() metadata.MD { + return s.getStream().Trailer() +} + +func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { + attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m) + if !attemptRetry { + return lastErr // success or hard failure + } + // We start off from attempt 1, because zeroth was already made on normal SendMsg(). + for attempt := uint(1); attempt < s.callOpts.max; attempt++ { + if err := waitRetryBackoff(attempt, s.parentCtx, s.callOpts); err != nil { + return err + } + callCtx := perCallContext(s.parentCtx, s.callOpts, attempt) + newStream, err := s.reestablishStreamAndResendBuffer(callCtx) + if err != nil { + // TODO(mwitkow): Maybe dial and transport errors should be retriable? + return err + } + s.setStream(newStream) + attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m) + //fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr) + if !attemptRetry { + return lastErr + } + } + return lastErr +} + +func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) { + s.mu.RLock() + wasGood := s.receivedGood + s.mu.RUnlock() + err := s.getStream().RecvMsg(m) + if err == nil || err == io.EOF { + s.mu.Lock() + s.receivedGood = true + s.mu.Unlock() + return false, err + } else if wasGood { + // previous RecvMsg in the stream succeeded, no retry logic should interfere + return false, err + } + if isContextError(err) { + if s.parentCtx.Err() != nil { + logTrace(s.parentCtx, "grpc_retry parent context error: %v", s.parentCtx.Err()) + return false, err + } else { + logTrace(s.parentCtx, "grpc_retry context error from retry call") + // its the callCtx deadline or cancellation, in which case try again. + return true, err + } + } + return isRetriable(err, s.callOpts), err + +} + +func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) { + s.mu.RLock() + bufferedSends := s.bufferedSends + s.mu.RUnlock() + newStream, err := s.streamerCall(callCtx) + if err != nil { + logTrace(callCtx, "grpc_retry failed redialing new stream: %v", err) + return nil, err + } + for _, msg := range bufferedSends { + if err := newStream.SendMsg(msg); err != nil { + logTrace(callCtx, "grpc_retry failed resending message: %v", err) + return nil, err + } + } + if err := newStream.CloseSend(); err != nil { + logTrace(callCtx, "grpc_retry failed CloseSend on new stream %v", err) + return nil, err + } + return newStream, nil +} + +func waitRetryBackoff(attempt uint, parentCtx context.Context, callOpts *options) error { + var waitTime time.Duration = 0 + if attempt > 0 { + waitTime = callOpts.backoffFunc(parentCtx, attempt) + } + if waitTime > 0 { + logTrace(parentCtx, "grpc_retry attempt: %d, backoff for %v", attempt, waitTime) + timer := time.NewTimer(waitTime) + select { + case <-parentCtx.Done(): + timer.Stop() + return contextErrToGrpcErr(parentCtx.Err()) + case <-timer.C: + } + } + return nil +} + +func isRetriable(err error, callOpts *options) bool { + errCode := status.Code(err) + if isContextError(err) { + // context errors are not retriable based on user settings. + return false + } + for _, code := range callOpts.codes { + if code == errCode { + return true + } + } + return false +} + +func isContextError(err error) bool { + code := status.Code(err) + return code == codes.DeadlineExceeded || code == codes.Canceled +} + +func perCallContext(parentCtx context.Context, callOpts *options, attempt uint) context.Context { + ctx := parentCtx + if callOpts.perCallTimeout != 0 { + ctx, _ = context.WithTimeout(ctx, callOpts.perCallTimeout) + } + if attempt > 0 && callOpts.includeHeader { + mdClone := metautils.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, fmt.Sprintf("%d", attempt)) + ctx = mdClone.ToOutgoing(ctx) + } + return ctx +} + +func contextErrToGrpcErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Errorf(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Errorf(codes.Canceled, err.Error()) + default: + return status.Errorf(codes.Unknown, err.Error()) + } +} + +func logTrace(ctx context.Context, format string, a ...interface{}) { + tr, ok := trace.FromContext(ctx) + if !ok { + return + } + tr.LazyPrintf(format, a...) +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go new file mode 100644 index 00000000000..4e69a6305aa --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go @@ -0,0 +1,28 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +/* +Backoff Helper Utilities + +Implements common backoff features. +*/ +package backoffutils + +import ( + "math/rand" + "time" +) + +// JitterUp adds random jitter to the duration. +// +// This adds or subtracts time from the duration within a given jitter fraction. +// For example for 10s and jitter 0.1, it will return a time within [9s, 11s]) +func JitterUp(duration time.Duration, jitter float64) time.Duration { + multiplier := jitter * (rand.Float64()*2 - 1) + return time.Duration(float64(duration) * (1 + multiplier)) +} + +// ExponentBase2 computes 2^(a-1) where a >= 1. If a is 0, the result is 0. +func ExponentBase2(a uint) uint { + return (1 << a) >> 1 +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go new file mode 100644 index 00000000000..1ed9bb499b3 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go @@ -0,0 +1,19 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +/* +Package `metautils` provides convenience functions for dealing with gRPC metadata.MD objects inside +Context handlers. + +While the upstream grpc-go package contains decent functionality (see https://github.com/grpc/grpc-go/blob/master/Documentation/grpc-metadata.md) +they are hard to use. + +The majority of functions center around the NiceMD, which is a convenience wrapper around metadata.MD. For example +the following code allows you to easily extract incoming metadata (server handler) and put it into a new client context +metadata. + + nmd := metautils.ExtractIncoming(serverCtx).Clone(":authorization", ":custom") + clientCtx := nmd.Set("x-client-header", "2").Set("x-another", "3").ToOutgoing(ctx) +*/ + +package metautils diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go new file mode 100644 index 00000000000..9f045674737 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go @@ -0,0 +1,126 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package metautils + +import ( + "context" + "strings" + + "google.golang.org/grpc/metadata" +) + +// NiceMD is a convenience wrapper definiting extra functions on the metadata. +type NiceMD metadata.MD + +// ExtractIncoming extracts an inbound metadata from the server-side context. +// +// This function always returns a NiceMD wrapper of the metadata.MD, in case the context doesn't have metadata it returns +// a new empty NiceMD. +func ExtractIncoming(ctx context.Context) NiceMD { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return NiceMD(metadata.Pairs()) + } + return NiceMD(md) +} + +// ExtractOutgoing extracts an outbound metadata from the client-side context. +// +// This function always returns a NiceMD wrapper of the metadata.MD, in case the context doesn't have metadata it returns +// a new empty NiceMD. +func ExtractOutgoing(ctx context.Context) NiceMD { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + return NiceMD(metadata.Pairs()) + } + return NiceMD(md) +} + +// Clone performs a *deep* copy of the metadata.MD. +// +// You can specify the lower-case copiedKeys to only copy certain whitelisted keys. If no keys are explicitly whitelisted +// all keys get copied. +func (m NiceMD) Clone(copiedKeys ...string) NiceMD { + newMd := NiceMD(metadata.Pairs()) + for k, vv := range m { + found := false + if len(copiedKeys) == 0 { + found = true + } else { + for _, allowedKey := range copiedKeys { + if strings.EqualFold(allowedKey, k) { + found = true + break + } + } + } + if !found { + continue + } + newMd[k] = make([]string, len(vv)) + copy(newMd[k], vv) + } + return NiceMD(newMd) +} + +// ToOutgoing sets the given NiceMD as a client-side context for dispatching. +func (m NiceMD) ToOutgoing(ctx context.Context) context.Context { + return metadata.NewOutgoingContext(ctx, metadata.MD(m)) +} + +// ToIncoming sets the given NiceMD as a server-side context for dispatching. +// +// This is mostly useful in ServerInterceptors.. +func (m NiceMD) ToIncoming(ctx context.Context) context.Context { + return metadata.NewIncomingContext(ctx, metadata.MD(m)) +} + +// Get retrieves a single value from the metadata. +// +// It works analogously to http.Header.Get, returning the first value if there are many set. If the value is not set, +// an empty string is returned. +// +// The function is binary-key safe. +func (m NiceMD) Get(key string) string { + k, _ := encodeKeyValue(key, "") + vv, ok := m[k] + if !ok { + return "" + } + return vv[0] +} + +// Del retrieves a single value from the metadata. +// +// It works analogously to http.Header.Del, deleting all values if they exist. +// +// The function is binary-key safe. + +func (m NiceMD) Del(key string) NiceMD { + k, _ := encodeKeyValue(key, "") + delete(m, k) + return m +} + +// Set sets the given value in a metadata. +// +// It works analogously to http.Header.Set, overwriting all previous metadata values. +// +// The function is binary-key safe. +func (m NiceMD) Set(key string, value string) NiceMD { + k, v := encodeKeyValue(key, value) + m[k] = []string{v} + return m +} + +// Add retrieves a single value from the metadata. +// +// It works analogously to http.Header.Add, as it appends to any existing values associated with key. +// +// The function is binary-key safe. +func (m NiceMD) Add(key string, value string) NiceMD { + k, v := encodeKeyValue(key, value) + m[k] = append(m[k], v) + return m +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/single_key.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/single_key.go new file mode 100644 index 00000000000..8a538716621 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/single_key.go @@ -0,0 +1,22 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package metautils + +import ( + "encoding/base64" + "strings" +) + +const ( + binHdrSuffix = "-bin" +) + +func encodeKeyValue(k, v string) (string, string) { + k = strings.ToLower(k) + if strings.HasSuffix(k, binHdrSuffix) { + val := base64.StdEncoding.EncodeToString([]byte(v)) + v = string(val) + } + return k, v +} diff --git a/vendor/k8s.io/client-go/discovery/fake/discovery.go b/vendor/k8s.io/client-go/discovery/fake/discovery.go new file mode 100644 index 00000000000..984a0ba1ec6 --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/fake/discovery.go @@ -0,0 +1,131 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "fmt" + + "github.com/googleapis/gnostic/OpenAPIv2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/version" + kubeversion "k8s.io/client-go/pkg/version" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/testing" +) + +// FakeDiscovery implements discovery.DiscoveryInterface and sometimes calls testing.Fake.Invoke with an action, +// but doesn't respect the return value if any. There is a way to fake static values like ServerVersion by using the Faked... fields on the struct. +type FakeDiscovery struct { + *testing.Fake + FakedServerVersion *version.Info +} + +func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + action := testing.ActionImpl{ + Verb: "get", + Resource: schema.GroupVersionResource{Resource: "resource"}, + } + c.Invokes(action, nil) + for _, resourceList := range c.Resources { + if resourceList.GroupVersion == groupVersion { + return resourceList, nil + } + } + return nil, fmt.Errorf("GroupVersion %q not found", groupVersion) +} + +func (c *FakeDiscovery) ServerResources() ([]*metav1.APIResourceList, error) { + action := testing.ActionImpl{ + Verb: "get", + Resource: schema.GroupVersionResource{Resource: "resource"}, + } + c.Invokes(action, nil) + return c.Resources, nil +} + +func (c *FakeDiscovery) ServerPreferredResources() ([]*metav1.APIResourceList, error) { + return nil, nil +} + +func (c *FakeDiscovery) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { + return nil, nil +} + +func (c *FakeDiscovery) ServerGroups() (*metav1.APIGroupList, error) { + action := testing.ActionImpl{ + Verb: "get", + Resource: schema.GroupVersionResource{Resource: "group"}, + } + c.Invokes(action, nil) + + groups := map[string]*metav1.APIGroup{} + + for _, res := range c.Resources { + gv, err := schema.ParseGroupVersion(res.GroupVersion) + if err != nil { + return nil, err + } + group := groups[gv.Group] + if group == nil { + group = &metav1.APIGroup{ + Name: gv.Group, + PreferredVersion: metav1.GroupVersionForDiscovery{ + GroupVersion: res.GroupVersion, + Version: gv.Version, + }, + } + groups[gv.Group] = group + } + + group.Versions = append(group.Versions, metav1.GroupVersionForDiscovery{ + GroupVersion: res.GroupVersion, + Version: gv.Version, + }) + } + + list := &metav1.APIGroupList{} + for _, apiGroup := range groups { + list.Groups = append(list.Groups, *apiGroup) + } + + return list, nil + +} + +func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { + action := testing.ActionImpl{} + action.Verb = "get" + action.Resource = schema.GroupVersionResource{Resource: "version"} + c.Invokes(action, nil) + + if c.FakedServerVersion != nil { + return c.FakedServerVersion, nil + } + + versionInfo := kubeversion.Get() + return &versionInfo, nil +} + +func (c *FakeDiscovery) OpenAPISchema() (*openapi_v2.Document, error) { + return &openapi_v2.Document{}, nil +} + +func (c *FakeDiscovery) RESTClient() restclient.Interface { + return nil +} diff --git a/vendor/k8s.io/client-go/testing/actions.go b/vendor/k8s.io/client-go/testing/actions.go new file mode 100644 index 00000000000..b99f231c8d7 --- /dev/null +++ b/vendor/k8s.io/client-go/testing/actions.go @@ -0,0 +1,659 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "path" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func NewRootGetAction(resource schema.GroupVersionResource, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Name = name + + return action +} + +func NewGetAction(resource schema.GroupVersionResource, namespace, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewGetSubresourceAction(resource schema.GroupVersionResource, namespace, subresource, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Subresource = subresource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewRootGetSubresourceAction(resource schema.GroupVersionResource, subresource, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Subresource = subresource + action.Name = name + + return action +} + +func NewRootListAction(resource schema.GroupVersionResource, kind schema.GroupVersionKind, opts interface{}) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + action.Kind = kind + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewListAction(resource schema.GroupVersionResource, kind schema.GroupVersionKind, namespace string, opts interface{}) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + action.Kind = kind + action.Namespace = namespace + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewRootCreateAction(resource schema.GroupVersionResource, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Object = object + + return action +} + +func NewCreateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource string, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Subresource = subresource + action.Name = name + action.Object = object + + return action +} + +func NewCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource, namespace string, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Namespace = namespace + action.Subresource = subresource + action.Name = name + action.Object = object + + return action +} + +func NewRootUpdateAction(resource schema.GroupVersionResource, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Object = object + + return action +} + +func NewUpdateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootPatchAction(resource schema.GroupVersionResource, name string, patch []byte) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Name = name + action.Patch = patch + + return action +} + +func NewPatchAction(resource schema.GroupVersionResource, namespace string, name string, patch []byte) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Namespace = namespace + action.Name = name + action.Patch = patch + + return action +} + +func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name string, patch []byte, subresources ...string) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Subresource = path.Join(subresources...) + action.Name = name + action.Patch = patch + + return action +} + +func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, name string, patch []byte, subresources ...string) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Subresource = path.Join(subresources...) + action.Namespace = namespace + action.Name = name + action.Patch = patch + + return action +} + +func NewRootUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Subresource = subresource + action.Object = object + + return action +} +func NewUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, namespace string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Subresource = subresource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootDeleteAction(resource schema.GroupVersionResource, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Name = name + + return action +} + +func NewRootDeleteSubresourceAction(resource schema.GroupVersionResource, subresource string, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Subresource = subresource + action.Name = name + + return action +} + +func NewDeleteAction(resource schema.GroupVersionResource, namespace, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewDeleteSubresourceAction(resource schema.GroupVersionResource, subresource, namespace, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Subresource = subresource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewRootDeleteCollectionAction(resource schema.GroupVersionResource, opts interface{}) DeleteCollectionActionImpl { + action := DeleteCollectionActionImpl{} + action.Verb = "delete-collection" + action.Resource = resource + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewDeleteCollectionAction(resource schema.GroupVersionResource, namespace string, opts interface{}) DeleteCollectionActionImpl { + action := DeleteCollectionActionImpl{} + action.Verb = "delete-collection" + action.Resource = resource + action.Namespace = namespace + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewRootWatchAction(resource schema.GroupVersionResource, opts interface{}) WatchActionImpl { + action := WatchActionImpl{} + action.Verb = "watch" + action.Resource = resource + labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) + action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} + + return action +} + +func ExtractFromListOptions(opts interface{}) (labelSelector labels.Selector, fieldSelector fields.Selector, resourceVersion string) { + var err error + switch t := opts.(type) { + case metav1.ListOptions: + labelSelector, err = labels.Parse(t.LabelSelector) + if err != nil { + panic(fmt.Errorf("invalid selector %q: %v", t.LabelSelector, err)) + } + fieldSelector, err = fields.ParseSelector(t.FieldSelector) + if err != nil { + panic(fmt.Errorf("invalid selector %q: %v", t.FieldSelector, err)) + } + resourceVersion = t.ResourceVersion + default: + panic(fmt.Errorf("expect a ListOptions %T", opts)) + } + if labelSelector == nil { + labelSelector = labels.Everything() + } + if fieldSelector == nil { + fieldSelector = fields.Everything() + } + return labelSelector, fieldSelector, resourceVersion +} + +func NewWatchAction(resource schema.GroupVersionResource, namespace string, opts interface{}) WatchActionImpl { + action := WatchActionImpl{} + action.Verb = "watch" + action.Resource = resource + action.Namespace = namespace + labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) + action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} + + return action +} + +func NewProxyGetAction(resource schema.GroupVersionResource, namespace, scheme, name, port, path string, params map[string]string) ProxyGetActionImpl { + action := ProxyGetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Namespace = namespace + action.Scheme = scheme + action.Name = name + action.Port = port + action.Path = path + action.Params = params + return action +} + +type ListRestrictions struct { + Labels labels.Selector + Fields fields.Selector +} +type WatchRestrictions struct { + Labels labels.Selector + Fields fields.Selector + ResourceVersion string +} + +type Action interface { + GetNamespace() string + GetVerb() string + GetResource() schema.GroupVersionResource + GetSubresource() string + Matches(verb, resource string) bool + + // DeepCopy is used to copy an action to avoid any risk of accidental mutation. Most people never need to call this + // because the invocation logic deep copies before calls to storage and reactors. + DeepCopy() Action +} + +type GenericAction interface { + Action + GetValue() interface{} +} + +type GetAction interface { + Action + GetName() string +} + +type ListAction interface { + Action + GetListRestrictions() ListRestrictions +} + +type CreateAction interface { + Action + GetObject() runtime.Object +} + +type UpdateAction interface { + Action + GetObject() runtime.Object +} + +type DeleteAction interface { + Action + GetName() string +} + +type DeleteCollectionAction interface { + Action + GetListRestrictions() ListRestrictions +} + +type PatchAction interface { + Action + GetName() string + GetPatch() []byte +} + +type WatchAction interface { + Action + GetWatchRestrictions() WatchRestrictions +} + +type ProxyGetAction interface { + Action + GetScheme() string + GetName() string + GetPort() string + GetPath() string + GetParams() map[string]string +} + +type ActionImpl struct { + Namespace string + Verb string + Resource schema.GroupVersionResource + Subresource string +} + +func (a ActionImpl) GetNamespace() string { + return a.Namespace +} +func (a ActionImpl) GetVerb() string { + return a.Verb +} +func (a ActionImpl) GetResource() schema.GroupVersionResource { + return a.Resource +} +func (a ActionImpl) GetSubresource() string { + return a.Subresource +} +func (a ActionImpl) Matches(verb, resource string) bool { + return strings.ToLower(verb) == strings.ToLower(a.Verb) && + strings.ToLower(resource) == strings.ToLower(a.Resource.Resource) +} +func (a ActionImpl) DeepCopy() Action { + ret := a + return ret +} + +type GenericActionImpl struct { + ActionImpl + Value interface{} +} + +func (a GenericActionImpl) GetValue() interface{} { + return a.Value +} + +func (a GenericActionImpl) DeepCopy() Action { + return GenericActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + // TODO this is wrong, but no worse than before + Value: a.Value, + } +} + +type GetActionImpl struct { + ActionImpl + Name string +} + +func (a GetActionImpl) GetName() string { + return a.Name +} + +func (a GetActionImpl) DeepCopy() Action { + return GetActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + } +} + +type ListActionImpl struct { + ActionImpl + Kind schema.GroupVersionKind + Name string + ListRestrictions ListRestrictions +} + +func (a ListActionImpl) GetKind() schema.GroupVersionKind { + return a.Kind +} + +func (a ListActionImpl) GetListRestrictions() ListRestrictions { + return a.ListRestrictions +} + +func (a ListActionImpl) DeepCopy() Action { + return ListActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Kind: a.Kind, + Name: a.Name, + ListRestrictions: ListRestrictions{ + Labels: a.ListRestrictions.Labels.DeepCopySelector(), + Fields: a.ListRestrictions.Fields.DeepCopySelector(), + }, + } +} + +type CreateActionImpl struct { + ActionImpl + Name string + Object runtime.Object +} + +func (a CreateActionImpl) GetObject() runtime.Object { + return a.Object +} + +func (a CreateActionImpl) DeepCopy() Action { + return CreateActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + Object: a.Object.DeepCopyObject(), + } +} + +type UpdateActionImpl struct { + ActionImpl + Object runtime.Object +} + +func (a UpdateActionImpl) GetObject() runtime.Object { + return a.Object +} + +func (a UpdateActionImpl) DeepCopy() Action { + return UpdateActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Object: a.Object.DeepCopyObject(), + } +} + +type PatchActionImpl struct { + ActionImpl + Name string + Patch []byte +} + +func (a PatchActionImpl) GetName() string { + return a.Name +} + +func (a PatchActionImpl) GetPatch() []byte { + return a.Patch +} + +func (a PatchActionImpl) DeepCopy() Action { + patch := make([]byte, len(a.Patch)) + copy(patch, a.Patch) + return PatchActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + Patch: patch, + } +} + +type DeleteActionImpl struct { + ActionImpl + Name string +} + +func (a DeleteActionImpl) GetName() string { + return a.Name +} + +func (a DeleteActionImpl) DeepCopy() Action { + return DeleteActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + } +} + +type DeleteCollectionActionImpl struct { + ActionImpl + ListRestrictions ListRestrictions +} + +func (a DeleteCollectionActionImpl) GetListRestrictions() ListRestrictions { + return a.ListRestrictions +} + +func (a DeleteCollectionActionImpl) DeepCopy() Action { + return DeleteCollectionActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + ListRestrictions: ListRestrictions{ + Labels: a.ListRestrictions.Labels.DeepCopySelector(), + Fields: a.ListRestrictions.Fields.DeepCopySelector(), + }, + } +} + +type WatchActionImpl struct { + ActionImpl + WatchRestrictions WatchRestrictions +} + +func (a WatchActionImpl) GetWatchRestrictions() WatchRestrictions { + return a.WatchRestrictions +} + +func (a WatchActionImpl) DeepCopy() Action { + return WatchActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + WatchRestrictions: WatchRestrictions{ + Labels: a.WatchRestrictions.Labels.DeepCopySelector(), + Fields: a.WatchRestrictions.Fields.DeepCopySelector(), + ResourceVersion: a.WatchRestrictions.ResourceVersion, + }, + } +} + +type ProxyGetActionImpl struct { + ActionImpl + Scheme string + Name string + Port string + Path string + Params map[string]string +} + +func (a ProxyGetActionImpl) GetScheme() string { + return a.Scheme +} + +func (a ProxyGetActionImpl) GetName() string { + return a.Name +} + +func (a ProxyGetActionImpl) GetPort() string { + return a.Port +} + +func (a ProxyGetActionImpl) GetPath() string { + return a.Path +} + +func (a ProxyGetActionImpl) GetParams() map[string]string { + return a.Params +} + +func (a ProxyGetActionImpl) DeepCopy() Action { + params := map[string]string{} + for k, v := range a.Params { + params[k] = v + } + return ProxyGetActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Scheme: a.Scheme, + Name: a.Name, + Port: a.Port, + Path: a.Path, + Params: params, + } +} diff --git a/vendor/k8s.io/client-go/testing/fake.go b/vendor/k8s.io/client-go/testing/fake.go new file mode 100644 index 00000000000..8b3f31eaf5c --- /dev/null +++ b/vendor/k8s.io/client-go/testing/fake.go @@ -0,0 +1,213 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "sync" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" +) + +// Fake implements client.Interface. Meant to be embedded into a struct to get +// a default implementation. This makes faking out just the method you want to +// test easier. +type Fake struct { + sync.RWMutex + actions []Action // these may be castable to other types, but "Action" is the minimum + + // ReactionChain is the list of reactors that will be attempted for every + // request in the order they are tried. + ReactionChain []Reactor + // WatchReactionChain is the list of watch reactors that will be attempted + // for every request in the order they are tried. + WatchReactionChain []WatchReactor + // ProxyReactionChain is the list of proxy reactors that will be attempted + // for every request in the order they are tried. + ProxyReactionChain []ProxyReactor + + Resources []*metav1.APIResourceList +} + +// Reactor is an interface to allow the composition of reaction functions. +type Reactor interface { + // Handles indicates whether or not this Reactor deals with a given + // action. + Handles(action Action) bool + // React handles the action and returns results. It may choose to + // delegate by indicated handled=false. + React(action Action) (handled bool, ret runtime.Object, err error) +} + +// WatchReactor is an interface to allow the composition of watch functions. +type WatchReactor interface { + // Handles indicates whether or not this Reactor deals with a given + // action. + Handles(action Action) bool + // React handles a watch action and returns results. It may choose to + // delegate by indicating handled=false. + React(action Action) (handled bool, ret watch.Interface, err error) +} + +// ProxyReactor is an interface to allow the composition of proxy get +// functions. +type ProxyReactor interface { + // Handles indicates whether or not this Reactor deals with a given + // action. + Handles(action Action) bool + // React handles a watch action and returns results. It may choose to + // delegate by indicating handled=false. + React(action Action) (handled bool, ret restclient.ResponseWrapper, err error) +} + +// ReactionFunc is a function that returns an object or error for a given +// Action. If "handled" is false, then the test client will ignore the +// results and continue to the next ReactionFunc. A ReactionFunc can describe +// reactions on subresources by testing the result of the action's +// GetSubresource() method. +type ReactionFunc func(action Action) (handled bool, ret runtime.Object, err error) + +// WatchReactionFunc is a function that returns a watch interface. If +// "handled" is false, then the test client will ignore the results and +// continue to the next ReactionFunc. +type WatchReactionFunc func(action Action) (handled bool, ret watch.Interface, err error) + +// ProxyReactionFunc is a function that returns a ResponseWrapper interface +// for a given Action. If "handled" is false, then the test client will +// ignore the results and continue to the next ProxyReactionFunc. +type ProxyReactionFunc func(action Action) (handled bool, ret restclient.ResponseWrapper, err error) + +// AddReactor appends a reactor to the end of the chain. +func (c *Fake) AddReactor(verb, resource string, reaction ReactionFunc) { + c.ReactionChain = append(c.ReactionChain, &SimpleReactor{verb, resource, reaction}) +} + +// PrependReactor adds a reactor to the beginning of the chain. +func (c *Fake) PrependReactor(verb, resource string, reaction ReactionFunc) { + c.ReactionChain = append([]Reactor{&SimpleReactor{verb, resource, reaction}}, c.ReactionChain...) +} + +// AddWatchReactor appends a reactor to the end of the chain. +func (c *Fake) AddWatchReactor(resource string, reaction WatchReactionFunc) { + c.WatchReactionChain = append(c.WatchReactionChain, &SimpleWatchReactor{resource, reaction}) +} + +// PrependWatchReactor adds a reactor to the beginning of the chain. +func (c *Fake) PrependWatchReactor(resource string, reaction WatchReactionFunc) { + c.WatchReactionChain = append([]WatchReactor{&SimpleWatchReactor{resource, reaction}}, c.WatchReactionChain...) +} + +// AddProxyReactor appends a reactor to the end of the chain. +func (c *Fake) AddProxyReactor(resource string, reaction ProxyReactionFunc) { + c.ProxyReactionChain = append(c.ProxyReactionChain, &SimpleProxyReactor{resource, reaction}) +} + +// PrependProxyReactor adds a reactor to the beginning of the chain. +func (c *Fake) PrependProxyReactor(resource string, reaction ProxyReactionFunc) { + c.ProxyReactionChain = append([]ProxyReactor{&SimpleProxyReactor{resource, reaction}}, c.ProxyReactionChain...) +} + +// Invokes records the provided Action and then invokes the ReactionFunc that +// handles the action if one exists. defaultReturnObj is expected to be of the +// same type a normal call would return. +func (c *Fake) Invokes(action Action, defaultReturnObj runtime.Object) (runtime.Object, error) { + c.Lock() + defer c.Unlock() + + c.actions = append(c.actions, action.DeepCopy()) + for _, reactor := range c.ReactionChain { + if !reactor.Handles(action) { + continue + } + + handled, ret, err := reactor.React(action.DeepCopy()) + if !handled { + continue + } + + return ret, err + } + + return defaultReturnObj, nil +} + +// InvokesWatch records the provided Action and then invokes the ReactionFunc +// that handles the action if one exists. +func (c *Fake) InvokesWatch(action Action) (watch.Interface, error) { + c.Lock() + defer c.Unlock() + + c.actions = append(c.actions, action.DeepCopy()) + for _, reactor := range c.WatchReactionChain { + if !reactor.Handles(action) { + continue + } + + handled, ret, err := reactor.React(action.DeepCopy()) + if !handled { + continue + } + + return ret, err + } + + return nil, fmt.Errorf("unhandled watch: %#v", action) +} + +// InvokesProxy records the provided Action and then invokes the ReactionFunc +// that handles the action if one exists. +func (c *Fake) InvokesProxy(action Action) restclient.ResponseWrapper { + c.Lock() + defer c.Unlock() + + c.actions = append(c.actions, action.DeepCopy()) + for _, reactor := range c.ProxyReactionChain { + if !reactor.Handles(action) { + continue + } + + handled, ret, err := reactor.React(action.DeepCopy()) + if !handled || err != nil { + continue + } + + return ret + } + + return nil +} + +// ClearActions clears the history of actions called on the fake client. +func (c *Fake) ClearActions() { + c.Lock() + defer c.Unlock() + + c.actions = make([]Action, 0) +} + +// Actions returns a chronologically ordered slice fake actions called on the +// fake client. +func (c *Fake) Actions() []Action { + c.RLock() + defer c.RUnlock() + fa := make([]Action, len(c.actions)) + copy(fa, c.actions) + return fa +} diff --git a/vendor/k8s.io/client-go/testing/fixture.go b/vendor/k8s.io/client-go/testing/fixture.go new file mode 100644 index 00000000000..00c4c49fce4 --- /dev/null +++ b/vendor/k8s.io/client-go/testing/fixture.go @@ -0,0 +1,530 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "sync" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" +) + +// ObjectTracker keeps track of objects. It is intended to be used to +// fake calls to a server by returning objects based on their kind, +// namespace and name. +type ObjectTracker interface { + // Add adds an object to the tracker. If object being added + // is a list, its items are added separately. + Add(obj runtime.Object) error + + // Get retrieves the object by its kind, namespace and name. + Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) + + // Create adds an object to the tracker in the specified namespace. + Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error + + // Update updates an existing object in the tracker in the specified namespace. + Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error + + // List retrieves all objects of a given kind in the given + // namespace. Only non-List kinds are accepted. + List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) + + // Delete deletes an existing object from the tracker. If object + // didn't exist in the tracker prior to deletion, Delete returns + // no error. + Delete(gvr schema.GroupVersionResource, ns, name string) error + + // Watch watches objects from the tracker. Watch returns a channel + // which will push added / modified / deleted object. + Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) +} + +// ObjectScheme abstracts the implementation of common operations on objects. +type ObjectScheme interface { + runtime.ObjectCreater + runtime.ObjectTyper +} + +// ObjectReaction returns a ReactionFunc that applies core.Action to +// the given tracker. +func ObjectReaction(tracker ObjectTracker) ReactionFunc { + return func(action Action) (bool, runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + // Here and below we need to switch on implementation types, + // not on interfaces, as some interfaces are identical + // (e.g. UpdateAction and CreateAction), so if we use them, + // updates and creates end up matching the same case branch. + switch action := action.(type) { + + case ListActionImpl: + obj, err := tracker.List(gvr, action.GetKind(), ns) + return true, obj, err + + case GetActionImpl: + obj, err := tracker.Get(gvr, ns, action.GetName()) + return true, obj, err + + case CreateActionImpl: + objMeta, err := meta.Accessor(action.GetObject()) + if err != nil { + return true, nil, err + } + if action.GetSubresource() == "" { + err = tracker.Create(gvr, action.GetObject(), ns) + } else { + // TODO: Currently we're handling subresource creation as an update + // on the enclosing resource. This works for some subresources but + // might not be generic enough. + err = tracker.Update(gvr, action.GetObject(), ns) + } + if err != nil { + return true, nil, err + } + obj, err := tracker.Get(gvr, ns, objMeta.GetName()) + return true, obj, err + + case UpdateActionImpl: + objMeta, err := meta.Accessor(action.GetObject()) + if err != nil { + return true, nil, err + } + err = tracker.Update(gvr, action.GetObject(), ns) + if err != nil { + return true, nil, err + } + obj, err := tracker.Get(gvr, ns, objMeta.GetName()) + return true, obj, err + + case DeleteActionImpl: + err := tracker.Delete(gvr, ns, action.GetName()) + if err != nil { + return true, nil, err + } + return true, nil, nil + + case PatchActionImpl: + obj, err := tracker.Get(gvr, ns, action.GetName()) + if err != nil { + // object is not registered + return false, nil, err + } + + old, err := json.Marshal(obj) + if err != nil { + return true, nil, err + } + // Only supports strategic merge patch + // TODO: Add support for other Patch types + mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) + if err != nil { + return true, nil, err + } + + if err = json.Unmarshal(mergedByte, obj); err != nil { + return true, nil, err + } + + if err = tracker.Update(gvr, obj, ns); err != nil { + return true, nil, err + } + + return true, obj, nil + + default: + return false, nil, fmt.Errorf("no reaction implemented for %s", action) + } + } +} + +type tracker struct { + scheme ObjectScheme + decoder runtime.Decoder + lock sync.RWMutex + objects map[schema.GroupVersionResource][]runtime.Object + // The value type of watchers is a map of which the key is either a namespace or + // all/non namespace aka "" and its value is list of fake watchers. + // Manipulations on resources will broadcast the notification events into the + // watchers' channel. Note that too many unhandled events (currently 100, + // see apimachinery/pkg/watch.DefaultChanSize) will cause a panic. + watchers map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher +} + +var _ ObjectTracker = &tracker{} + +// NewObjectTracker returns an ObjectTracker that can be used to keep track +// of objects for the fake clientset. Mostly useful for unit tests. +func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder) ObjectTracker { + return &tracker{ + scheme: scheme, + decoder: decoder, + objects: make(map[schema.GroupVersionResource][]runtime.Object), + watchers: make(map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher), + } +} + +func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) { + // Heuristic for list kind: original kind + List suffix. Might + // not always be true but this tracker has a pretty limited + // understanding of the actual API model. + listGVK := gvk + listGVK.Kind = listGVK.Kind + "List" + // GVK does have the concept of "internal version". The scheme recognizes + // the runtime.APIVersionInternal, but not the empty string. + if listGVK.Version == "" { + listGVK.Version = runtime.APIVersionInternal + } + + list, err := t.scheme.New(listGVK) + if err != nil { + return nil, err + } + + if !meta.IsListType(list) { + return nil, fmt.Errorf("%q is not a list type", listGVK.Kind) + } + + t.lock.RLock() + defer t.lock.RUnlock() + + objs, ok := t.objects[gvr] + if !ok { + return list, nil + } + + matchingObjs, err := filterByNamespaceAndName(objs, ns, "") + if err != nil { + return nil, err + } + if err := meta.SetList(list, matchingObjs); err != nil { + return nil, err + } + return list.DeepCopyObject(), nil +} + +func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) { + t.lock.Lock() + defer t.lock.Unlock() + + fakewatcher := watch.NewRaceFreeFake() + + if _, exists := t.watchers[gvr]; !exists { + t.watchers[gvr] = make(map[string][]*watch.RaceFreeFakeWatcher) + } + t.watchers[gvr][ns] = append(t.watchers[gvr][ns], fakewatcher) + return fakewatcher, nil +} + +func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) { + errNotFound := errors.NewNotFound(gvr.GroupResource(), name) + + t.lock.RLock() + defer t.lock.RUnlock() + + objs, ok := t.objects[gvr] + if !ok { + return nil, errNotFound + } + + matchingObjs, err := filterByNamespaceAndName(objs, ns, name) + if err != nil { + return nil, err + } + if len(matchingObjs) == 0 { + return nil, errNotFound + } + if len(matchingObjs) > 1 { + return nil, fmt.Errorf("more than one object matched gvr %s, ns: %q name: %q", gvr, ns, name) + } + + // Only one object should match in the tracker if it works + // correctly, as Add/Update methods enforce kind/namespace/name + // uniqueness. + obj := matchingObjs[0].DeepCopyObject() + if status, ok := obj.(*metav1.Status); ok { + if status.Status != metav1.StatusSuccess { + return nil, &errors.StatusError{ErrStatus: *status} + } + } + + return obj, nil +} + +func (t *tracker) Add(obj runtime.Object) error { + if meta.IsListType(obj) { + return t.addList(obj, false) + } + objMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + gvks, _, err := t.scheme.ObjectKinds(obj) + if err != nil { + return err + } + if len(gvks) == 0 { + return fmt.Errorf("no registered kinds for %v", obj) + } + for _, gvk := range gvks { + // NOTE: UnsafeGuessKindToResource is a heuristic and default match. The + // actual registration in apiserver can specify arbitrary route for a + // gvk. If a test uses such objects, it cannot preset the tracker with + // objects via Add(). Instead, it should trigger the Create() function + // of the tracker, where an arbitrary gvr can be specified. + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + // Resource doesn't have the concept of "__internal" version, just set it to "". + if gvr.Version == runtime.APIVersionInternal { + gvr.Version = "" + } + + err := t.add(gvr, obj, objMeta.GetNamespace(), false) + if err != nil { + return err + } + } + return nil +} + +func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + return t.add(gvr, obj, ns, false) +} + +func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + return t.add(gvr, obj, ns, true) +} + +func (t *tracker) getWatches(gvr schema.GroupVersionResource, ns string) []*watch.RaceFreeFakeWatcher { + watches := []*watch.RaceFreeFakeWatcher{} + if t.watchers[gvr] != nil { + if w := t.watchers[gvr][ns]; w != nil { + watches = append(watches, w...) + } + if w := t.watchers[gvr][""]; w != nil { + watches = append(watches, w...) + } + } + return watches +} + +func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns string, replaceExisting bool) error { + t.lock.Lock() + defer t.lock.Unlock() + + gr := gvr.GroupResource() + + // To avoid the object from being accidentally modified by caller + // after it's been added to the tracker, we always store the deep + // copy. + obj = obj.DeepCopyObject() + + newMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + + // Propagate namespace to the new object if hasn't already been set. + if len(newMeta.GetNamespace()) == 0 { + newMeta.SetNamespace(ns) + } + + if ns != newMeta.GetNamespace() { + msg := fmt.Sprintf("request namespace does not match object namespace, request: %q object: %q", ns, newMeta.GetNamespace()) + return errors.NewBadRequest(msg) + } + + for i, existingObj := range t.objects[gvr] { + oldMeta, err := meta.Accessor(existingObj) + if err != nil { + return err + } + if oldMeta.GetNamespace() == newMeta.GetNamespace() && oldMeta.GetName() == newMeta.GetName() { + if replaceExisting { + for _, w := range t.getWatches(gvr, ns) { + w.Modify(obj) + } + t.objects[gvr][i] = obj + return nil + } + return errors.NewAlreadyExists(gr, newMeta.GetName()) + } + } + + if replaceExisting { + // Tried to update but no matching object was found. + return errors.NewNotFound(gr, newMeta.GetName()) + } + + t.objects[gvr] = append(t.objects[gvr], obj) + + for _, w := range t.getWatches(gvr, ns) { + w.Add(obj) + } + + return nil +} + +func (t *tracker) addList(obj runtime.Object, replaceExisting bool) error { + list, err := meta.ExtractList(obj) + if err != nil { + return err + } + errs := runtime.DecodeList(list, t.decoder) + if len(errs) > 0 { + return errs[0] + } + for _, obj := range list { + if err := t.Add(obj); err != nil { + return err + } + } + return nil +} + +func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error { + t.lock.Lock() + defer t.lock.Unlock() + + found := false + + for i, existingObj := range t.objects[gvr] { + objMeta, err := meta.Accessor(existingObj) + if err != nil { + return err + } + if objMeta.GetNamespace() == ns && objMeta.GetName() == name { + obj := t.objects[gvr][i] + t.objects[gvr] = append(t.objects[gvr][:i], t.objects[gvr][i+1:]...) + for _, w := range t.getWatches(gvr, ns) { + w.Delete(obj) + } + found = true + break + } + } + + if found { + return nil + } + + return errors.NewNotFound(gvr.GroupResource(), name) +} + +// filterByNamespaceAndName returns all objects in the collection that +// match provided namespace and name. Empty namespace matches +// non-namespaced objects. +func filterByNamespaceAndName(objs []runtime.Object, ns, name string) ([]runtime.Object, error) { + var res []runtime.Object + + for _, obj := range objs { + acc, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + if ns != "" && acc.GetNamespace() != ns { + continue + } + if name != "" && acc.GetName() != name { + continue + } + res = append(res, obj) + } + + return res, nil +} + +func DefaultWatchReactor(watchInterface watch.Interface, err error) WatchReactionFunc { + return func(action Action) (bool, watch.Interface, error) { + return true, watchInterface, err + } +} + +// SimpleReactor is a Reactor. Each reaction function is attached to a given verb,resource tuple. "*" in either field matches everything for that value. +// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions +type SimpleReactor struct { + Verb string + Resource string + + Reaction ReactionFunc +} + +func (r *SimpleReactor) Handles(action Action) bool { + verbCovers := r.Verb == "*" || r.Verb == action.GetVerb() + if !verbCovers { + return false + } + resourceCovers := r.Resource == "*" || r.Resource == action.GetResource().Resource + if !resourceCovers { + return false + } + + return true +} + +func (r *SimpleReactor) React(action Action) (bool, runtime.Object, error) { + return r.Reaction(action) +} + +// SimpleWatchReactor is a WatchReactor. Each reaction function is attached to a given resource. "*" matches everything for that value. +// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions +type SimpleWatchReactor struct { + Resource string + + Reaction WatchReactionFunc +} + +func (r *SimpleWatchReactor) Handles(action Action) bool { + resourceCovers := r.Resource == "*" || r.Resource == action.GetResource().Resource + if !resourceCovers { + return false + } + + return true +} + +func (r *SimpleWatchReactor) React(action Action) (bool, watch.Interface, error) { + return r.Reaction(action) +} + +// SimpleProxyReactor is a ProxyReactor. Each reaction function is attached to a given resource. "*" matches everything for that value. +// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions. +type SimpleProxyReactor struct { + Resource string + + Reaction ProxyReactionFunc +} + +func (r *SimpleProxyReactor) Handles(action Action) bool { + resourceCovers := r.Resource == "*" || r.Resource == action.GetResource().Resource + if !resourceCovers { + return false + } + + return true +} + +func (r *SimpleProxyReactor) React(action Action) (bool, restclient.ResponseWrapper, error) { + return r.Reaction(action) +}