From cfe44e869af913e110e5ca4115f35a35bd8e2353 Mon Sep 17 00:00:00 2001 From: Evgeniy Kulikov Date: Mon, 29 Jan 2018 15:59:23 +0300 Subject: [PATCH] Cleanup vendor (#5726) * Cleanup vendor --- Gopkg.toml | 26 +- hack/clean_vendor.sh | 2 + .../apache/thrift/contrib/fb303/LICENSE | 16 + .../github.com/apache/thrift/lib/hs/LICENSE | 202 ++ vendor/github.com/coreos/etcd/auth/doc.go | 16 - vendor/github.com/coreos/etcd/auth/jwt.go | 137 -- .../coreos/etcd/auth/range_perm_cache.go | 133 -- .../coreos/etcd/auth/simple_token.go | 220 --- vendor/github.com/coreos/etcd/auth/store.go | 1059 ----------- .../coreos/etcd/client/auth_role.go | 237 --- .../coreos/etcd/client/auth_user.go | 320 ---- .../coreos/etcd/client/cancelreq.go | 18 - .../github.com/coreos/etcd/client/client.go | 704 ------- .../coreos/etcd/client/cluster_error.go | 37 - vendor/github.com/coreos/etcd/client/curl.go | 70 - .../github.com/coreos/etcd/client/discover.go | 40 - vendor/github.com/coreos/etcd/client/doc.go | 73 - .../coreos/etcd/client/keys.generated.go | 1087 ----------- vendor/github.com/coreos/etcd/client/keys.go | 682 ------- .../github.com/coreos/etcd/client/members.go | 304 --- vendor/github.com/coreos/etcd/client/util.go | 53 - .../coreos/etcd/etcdserver/api/capability.go | 86 - .../coreos/etcd/etcdserver/api/cluster.go | 41 - .../coreos/etcd/etcdserver/api/doc.go | 16 - .../coreos/etcd/etcdserver/api/v3rpc/auth.go | 157 -- .../coreos/etcd/etcdserver/api/v3rpc/codec.go | 34 - .../coreos/etcd/etcdserver/api/v3rpc/grpc.go | 71 - .../etcd/etcdserver/api/v3rpc/header.go | 46 - .../etcd/etcdserver/api/v3rpc/interceptor.go | 144 -- .../coreos/etcd/etcdserver/api/v3rpc/key.go | 259 --- .../coreos/etcd/etcdserver/api/v3rpc/lease.go | 133 -- .../etcd/etcdserver/api/v3rpc/maintenance.go | 190 -- .../etcd/etcdserver/api/v3rpc/member.go | 103 - .../etcd/etcdserver/api/v3rpc/metrics.go | 38 - .../coreos/etcd/etcdserver/api/v3rpc/quota.go | 89 - .../coreos/etcd/etcdserver/api/v3rpc/util.go | 118 -- .../coreos/etcd/etcdserver/api/v3rpc/watch.go | 446 ----- .../coreos/etcd/etcdserver/apply.go | 878 --------- .../coreos/etcd/etcdserver/apply_auth.go | 196 -- .../coreos/etcd/etcdserver/apply_v2.go | 140 -- .../coreos/etcd/etcdserver/backend.go | 81 - .../coreos/etcd/etcdserver/cluster_util.go | 258 --- .../coreos/etcd/etcdserver/config.go | 209 --- .../etcd/etcdserver/consistent_index.go | 33 - .../github.com/coreos/etcd/etcdserver/doc.go | 16 - .../coreos/etcd/etcdserver/errors.go | 46 - .../coreos/etcd/etcdserver/metrics.go | 102 - .../coreos/etcd/etcdserver/quota.go | 121 -- .../github.com/coreos/etcd/etcdserver/raft.go | 594 ------ .../coreos/etcd/etcdserver/server.go | 1664 ----------------- .../coreos/etcd/etcdserver/snapshot_merge.go | 73 - .../coreos/etcd/etcdserver/storage.go | 98 - .../github.com/coreos/etcd/etcdserver/util.go | 97 - .../coreos/etcd/etcdserver/v2_server.go | 125 -- .../coreos/etcd/etcdserver/v3_server.go | 686 ------- vendor/github.com/coreos/etcd/main.go | 29 - vendor/github.com/coreos/etcd/mvcc/doc.go | 16 - vendor/github.com/coreos/etcd/mvcc/index.go | 219 --- .../github.com/coreos/etcd/mvcc/key_index.go | 332 ---- vendor/github.com/coreos/etcd/mvcc/kv.go | 147 -- vendor/github.com/coreos/etcd/mvcc/kv_view.go | 53 - vendor/github.com/coreos/etcd/mvcc/kvstore.go | 459 ----- .../coreos/etcd/mvcc/kvstore_compaction.go | 66 - .../coreos/etcd/mvcc/kvstore_txn.go | 253 --- vendor/github.com/coreos/etcd/mvcc/metrics.go | 174 -- .../coreos/etcd/mvcc/metrics_txn.go | 67 - .../github.com/coreos/etcd/mvcc/revision.go | 67 - vendor/github.com/coreos/etcd/mvcc/util.go | 56 - .../coreos/etcd/mvcc/watchable_store.go | 522 ------ .../coreos/etcd/mvcc/watchable_store_txn.go | 53 - vendor/github.com/coreos/etcd/mvcc/watcher.go | 171 -- .../coreos/etcd/mvcc/watcher_group.go | 283 --- vendor/github.com/coreos/gofail/gofail.go | 195 -- vendor/github.com/cznic/golex/doc.go | 250 --- vendor/github.com/cznic/golex/main.go | 152 -- vendor/github.com/cznic/golex/render.go | 280 --- .../cznic/mathutil/mersenne/LICENSE | 27 + vendor/github.com/cznic/parser/nquads/LICENSE | 27 + .../golang/protobuf/protoc-gen-go/doc.go | 51 - .../protobuf/protoc-gen-go/link_grpc.go | 34 - .../golang/protobuf/protoc-gen-go/main.go | 98 - .../prometheus/client_model/ruby/LICENSE | 201 ++ vendor/golang.org/x/text/doc.go | 13 - vendor/golang.org/x/text/gen.go | 292 --- vendor/golang.org/x/text/internal/gen.go | 52 - vendor/golang.org/x/text/internal/internal.go | 51 - vendor/golang.org/x/text/internal/match.go | 67 - vendor/golang.org/x/text/internal/tables.go | 116 -- vendor/golang.org/x/text/secure/doc.go | 6 - vendor/golang.org/x/text/unicode/doc.go | 8 - .../googleapis/api/authorization_config.pb.go | 80 - .../googleapis/api/experimental.pb.go | 56 - vendor/google.golang.org/genproto/regen.go | 123 -- .../grpc/grpclb/grpc_lb_v1/doc.go | 21 - .../google.golang.org/grpc/health/health.go | 72 - 95 files changed, 480 insertions(+), 17583 deletions(-) create mode 100644 vendor/github.com/apache/thrift/contrib/fb303/LICENSE create mode 100644 vendor/github.com/apache/thrift/lib/hs/LICENSE delete mode 100644 vendor/github.com/coreos/etcd/auth/doc.go delete mode 100644 vendor/github.com/coreos/etcd/auth/jwt.go delete mode 100644 vendor/github.com/coreos/etcd/auth/range_perm_cache.go delete mode 100644 vendor/github.com/coreos/etcd/auth/simple_token.go delete mode 100644 vendor/github.com/coreos/etcd/auth/store.go delete mode 100644 vendor/github.com/coreos/etcd/client/auth_role.go delete mode 100644 vendor/github.com/coreos/etcd/client/auth_user.go delete mode 100644 vendor/github.com/coreos/etcd/client/cancelreq.go delete mode 100644 vendor/github.com/coreos/etcd/client/client.go delete mode 100644 vendor/github.com/coreos/etcd/client/cluster_error.go delete mode 100644 vendor/github.com/coreos/etcd/client/curl.go delete mode 100644 vendor/github.com/coreos/etcd/client/discover.go delete mode 100644 vendor/github.com/coreos/etcd/client/doc.go delete mode 100644 vendor/github.com/coreos/etcd/client/keys.generated.go delete mode 100644 vendor/github.com/coreos/etcd/client/keys.go delete mode 100644 vendor/github.com/coreos/etcd/client/members.go delete mode 100644 vendor/github.com/coreos/etcd/client/util.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/capability.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/cluster.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/doc.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/apply.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/apply_auth.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/apply_v2.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/backend.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/cluster_util.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/config.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/consistent_index.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/doc.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/errors.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/metrics.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/quota.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/raft.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/server.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/storage.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/util.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/v2_server.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/v3_server.go delete mode 100644 vendor/github.com/coreos/etcd/main.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/doc.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/index.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/key_index.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/kv.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/kv_view.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/kvstore.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/metrics.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/metrics_txn.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/revision.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/util.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/watchable_store.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/watcher.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/watcher_group.go delete mode 100644 vendor/github.com/coreos/gofail/gofail.go delete mode 100644 vendor/github.com/cznic/golex/doc.go delete mode 100644 vendor/github.com/cznic/golex/main.go delete mode 100644 vendor/github.com/cznic/golex/render.go create mode 100644 vendor/github.com/cznic/mathutil/mersenne/LICENSE create mode 100644 vendor/github.com/cznic/parser/nquads/LICENSE delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/doc.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/main.go create mode 100644 vendor/github.com/prometheus/client_model/ruby/LICENSE delete mode 100644 vendor/golang.org/x/text/doc.go delete mode 100644 vendor/golang.org/x/text/gen.go delete mode 100644 vendor/golang.org/x/text/internal/gen.go delete mode 100644 vendor/golang.org/x/text/internal/internal.go delete mode 100644 vendor/golang.org/x/text/internal/match.go delete mode 100644 vendor/golang.org/x/text/internal/tables.go delete mode 100644 vendor/golang.org/x/text/secure/doc.go delete mode 100644 vendor/golang.org/x/text/unicode/doc.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/api/authorization_config.pb.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/api/experimental.pb.go delete mode 100644 vendor/google.golang.org/genproto/regen.go delete mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go delete mode 100644 vendor/google.golang.org/grpc/health/health.go diff --git a/Gopkg.toml b/Gopkg.toml index 16b67dad6e479..15fd687d47cdf 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -1,26 +1,10 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - ignored = ["github.com/coreos/etcd/etcdserver", "github.com/coreos/etcd/mvcc", "github.com/coreos/etcd/auth"] +[prune] + non-go = true + go-tests = true + unused-packages = true + [[constraint]] branch = "master" name = "github.com/BurntSushi/toml" diff --git a/hack/clean_vendor.sh b/hack/clean_vendor.sh index 676ee0c907886..81ba6b127efdc 100644 --- a/hack/clean_vendor.sh +++ b/hack/clean_vendor.sh @@ -2,3 +2,5 @@ find vendor \( -type f -or -type l \) -not -name "*.go" -not -name "LICENSE" -n # delete all test files find vendor -type f -name "*_generated.go" | xargs -I {} rm {} find vendor -type f -name "*_test.go" | xargs -I {} rm {} +find vendor -type d -name "_vendor" | xargs -I {} rm -rf {} +find vendor -type d -empty | xargs -I {} rm -rf {} \ No newline at end of file diff --git a/vendor/github.com/apache/thrift/contrib/fb303/LICENSE b/vendor/github.com/apache/thrift/contrib/fb303/LICENSE new file mode 100644 index 0000000000000..4eacb64317973 --- /dev/null +++ b/vendor/github.com/apache/thrift/contrib/fb303/LICENSE @@ -0,0 +1,16 @@ +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. diff --git a/vendor/github.com/apache/thrift/lib/hs/LICENSE b/vendor/github.com/apache/thrift/lib/hs/LICENSE new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/hs/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/etcd/auth/doc.go b/vendor/github.com/coreos/etcd/auth/doc.go deleted file mode 100644 index 72741a107745c..0000000000000 --- a/vendor/github.com/coreos/etcd/auth/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package auth provides client role authentication for accessing keys in etcd. -package auth diff --git a/vendor/github.com/coreos/etcd/auth/jwt.go b/vendor/github.com/coreos/etcd/auth/jwt.go deleted file mode 100644 index 214ae48c83a92..0000000000000 --- a/vendor/github.com/coreos/etcd/auth/jwt.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "crypto/rsa" - "io/ioutil" - - jwt "github.com/dgrijalva/jwt-go" - "golang.org/x/net/context" -) - -type tokenJWT struct { - signMethod string - signKey *rsa.PrivateKey - verifyKey *rsa.PublicKey -} - -func (t *tokenJWT) enable() {} -func (t *tokenJWT) disable() {} -func (t *tokenJWT) invalidateUser(string) {} -func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil } - -func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) { - // rev isn't used in JWT, it is only used in simple token - var ( - username string - revision uint64 - ) - - parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { - return t.verifyKey, nil - }) - - switch err.(type) { - case nil: - if !parsed.Valid { - plog.Warningf("invalid jwt token: %s", token) - return nil, false - } - - claims := parsed.Claims.(jwt.MapClaims) - - username = claims["username"].(string) - revision = uint64(claims["revision"].(float64)) - default: - plog.Warningf("failed to parse jwt token: %s", err) - return nil, false - } - - return &AuthInfo{Username: username, Revision: revision}, true -} - -func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) { - // Future work: let a jwt token include permission information would be useful for - // permission checking in proxy side. - tk := jwt.NewWithClaims(jwt.GetSigningMethod(t.signMethod), - jwt.MapClaims{ - "username": username, - "revision": revision, - }) - - token, err := tk.SignedString(t.signKey) - if err != nil { - plog.Debugf("failed to sign jwt token: %s", err) - return "", err - } - - plog.Debugf("jwt token: %s", token) - - return token, err -} - -func prepareOpts(opts map[string]string) (jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath string, err error) { - for k, v := range opts { - switch k { - case "sign-method": - jwtSignMethod = v - case "pub-key": - jwtPubKeyPath = v - case "priv-key": - jwtPrivKeyPath = v - default: - plog.Errorf("unknown token specific option: %s", k) - return "", "", "", ErrInvalidAuthOpts - } - } - - return jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, nil -} - -func newTokenProviderJWT(opts map[string]string) (*tokenJWT, error) { - jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, err := prepareOpts(opts) - if err != nil { - return nil, ErrInvalidAuthOpts - } - - t := &tokenJWT{} - - t.signMethod = jwtSignMethod - - verifyBytes, err := ioutil.ReadFile(jwtPubKeyPath) - if err != nil { - plog.Errorf("failed to read public key (%s) for jwt: %s", jwtPubKeyPath, err) - return nil, err - } - t.verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes) - if err != nil { - plog.Errorf("failed to parse public key (%s): %s", jwtPubKeyPath, err) - return nil, err - } - - signBytes, err := ioutil.ReadFile(jwtPrivKeyPath) - if err != nil { - plog.Errorf("failed to read private key (%s) for jwt: %s", jwtPrivKeyPath, err) - return nil, err - } - t.signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes) - if err != nil { - plog.Errorf("failed to parse private key (%s): %s", jwtPrivKeyPath, err) - return nil, err - } - - return t, nil -} diff --git a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go deleted file mode 100644 index 691b65ba38e74..0000000000000 --- a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "github.com/coreos/etcd/auth/authpb" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/adt" -) - -func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermissions { - user := getUser(tx, userName) - if user == nil { - plog.Errorf("invalid user name %s", userName) - return nil - } - - readPerms := &adt.IntervalTree{} - writePerms := &adt.IntervalTree{} - - for _, roleName := range user.Roles { - role := getRole(tx, roleName) - if role == nil { - continue - } - - for _, perm := range role.KeyPermission { - var ivl adt.Interval - var rangeEnd []byte - - if len(perm.RangeEnd) != 1 || perm.RangeEnd[0] != 0 { - rangeEnd = perm.RangeEnd - } - - if len(perm.RangeEnd) != 0 { - ivl = adt.NewBytesAffineInterval(perm.Key, rangeEnd) - } else { - ivl = adt.NewBytesAffinePoint(perm.Key) - } - - switch perm.PermType { - case authpb.READWRITE: - readPerms.Insert(ivl, struct{}{}) - writePerms.Insert(ivl, struct{}{}) - - case authpb.READ: - readPerms.Insert(ivl, struct{}{}) - - case authpb.WRITE: - writePerms.Insert(ivl, struct{}{}) - } - } - } - - return &unifiedRangePermissions{ - readPerms: readPerms, - writePerms: writePerms, - } -} - -func checkKeyInterval(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { - if len(rangeEnd) == 1 && rangeEnd[0] == 0 { - rangeEnd = nil - } - - ivl := adt.NewBytesAffineInterval(key, rangeEnd) - switch permtyp { - case authpb.READ: - return cachedPerms.readPerms.Contains(ivl) - case authpb.WRITE: - return cachedPerms.writePerms.Contains(ivl) - default: - plog.Panicf("unknown auth type: %v", permtyp) - } - return false -} - -func checkKeyPoint(cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool { - pt := adt.NewBytesAffinePoint(key) - switch permtyp { - case authpb.READ: - return cachedPerms.readPerms.Intersects(pt) - case authpb.WRITE: - return cachedPerms.writePerms.Intersects(pt) - default: - plog.Panicf("unknown auth type: %v", permtyp) - } - return false -} - -func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { - // assumption: tx is Lock()ed - _, ok := as.rangePermCache[userName] - if !ok { - perms := getMergedPerms(tx, userName) - if perms == nil { - plog.Errorf("failed to create a unified permission of user %s", userName) - return false - } - as.rangePermCache[userName] = perms - } - - if len(rangeEnd) == 0 { - return checkKeyPoint(as.rangePermCache[userName], key, permtyp) - } - - return checkKeyInterval(as.rangePermCache[userName], key, rangeEnd, permtyp) -} - -func (as *authStore) clearCachedPerm() { - as.rangePermCache = make(map[string]*unifiedRangePermissions) -} - -func (as *authStore) invalidateCachedPerm(userName string) { - delete(as.rangePermCache, userName) -} - -type unifiedRangePermissions struct { - readPerms *adt.IntervalTree - writePerms *adt.IntervalTree -} diff --git a/vendor/github.com/coreos/etcd/auth/simple_token.go b/vendor/github.com/coreos/etcd/auth/simple_token.go deleted file mode 100644 index 94d92a115e24c..0000000000000 --- a/vendor/github.com/coreos/etcd/auth/simple_token.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -// CAUTION: This randum number based token mechanism is only for testing purpose. -// JWT based mechanism will be added in the near future. - -import ( - "crypto/rand" - "fmt" - "math/big" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/context" -) - -const ( - letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" - defaultSimpleTokenLength = 16 -) - -// var for testing purposes -var ( - simpleTokenTTL = 5 * time.Minute - simpleTokenTTLResolution = 1 * time.Second -) - -type simpleTokenTTLKeeper struct { - tokens map[string]time.Time - donec chan struct{} - stopc chan struct{} - deleteTokenFunc func(string) - mu *sync.Mutex -} - -func (tm *simpleTokenTTLKeeper) stop() { - select { - case tm.stopc <- struct{}{}: - case <-tm.donec: - } - <-tm.donec -} - -func (tm *simpleTokenTTLKeeper) addSimpleToken(token string) { - tm.tokens[token] = time.Now().Add(simpleTokenTTL) -} - -func (tm *simpleTokenTTLKeeper) resetSimpleToken(token string) { - if _, ok := tm.tokens[token]; ok { - tm.tokens[token] = time.Now().Add(simpleTokenTTL) - } -} - -func (tm *simpleTokenTTLKeeper) deleteSimpleToken(token string) { - delete(tm.tokens, token) -} - -func (tm *simpleTokenTTLKeeper) run() { - tokenTicker := time.NewTicker(simpleTokenTTLResolution) - defer func() { - tokenTicker.Stop() - close(tm.donec) - }() - for { - select { - case <-tokenTicker.C: - nowtime := time.Now() - tm.mu.Lock() - for t, tokenendtime := range tm.tokens { - if nowtime.After(tokenendtime) { - tm.deleteTokenFunc(t) - delete(tm.tokens, t) - } - } - tm.mu.Unlock() - case <-tm.stopc: - return - } - } -} - -type tokenSimple struct { - indexWaiter func(uint64) <-chan struct{} - simpleTokenKeeper *simpleTokenTTLKeeper - simpleTokensMu sync.Mutex - simpleTokens map[string]string // token -> username -} - -func (t *tokenSimple) genTokenPrefix() (string, error) { - ret := make([]byte, defaultSimpleTokenLength) - - for i := 0; i < defaultSimpleTokenLength; i++ { - bInt, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters)))) - if err != nil { - return "", err - } - - ret[i] = letters[bInt.Int64()] - } - - return string(ret), nil -} - -func (t *tokenSimple) assignSimpleTokenToUser(username, token string) { - t.simpleTokensMu.Lock() - _, ok := t.simpleTokens[token] - if ok { - plog.Panicf("token %s is alredy used", token) - } - - t.simpleTokens[token] = username - t.simpleTokenKeeper.addSimpleToken(token) - t.simpleTokensMu.Unlock() -} - -func (t *tokenSimple) invalidateUser(username string) { - if t.simpleTokenKeeper == nil { - return - } - t.simpleTokensMu.Lock() - for token, name := range t.simpleTokens { - if strings.Compare(name, username) == 0 { - delete(t.simpleTokens, token) - t.simpleTokenKeeper.deleteSimpleToken(token) - } - } - t.simpleTokensMu.Unlock() -} - -func (t *tokenSimple) enable() { - delf := func(tk string) { - if username, ok := t.simpleTokens[tk]; ok { - plog.Infof("deleting token %s for user %s", tk, username) - delete(t.simpleTokens, tk) - } - } - t.simpleTokenKeeper = &simpleTokenTTLKeeper{ - tokens: make(map[string]time.Time), - donec: make(chan struct{}), - stopc: make(chan struct{}), - deleteTokenFunc: delf, - mu: &t.simpleTokensMu, - } - go t.simpleTokenKeeper.run() -} - -func (t *tokenSimple) disable() { - t.simpleTokensMu.Lock() - tk := t.simpleTokenKeeper - t.simpleTokenKeeper = nil - t.simpleTokens = make(map[string]string) // invalidate all tokens - t.simpleTokensMu.Unlock() - if tk != nil { - tk.stop() - } -} - -func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) { - if !t.isValidSimpleToken(ctx, token) { - return nil, false - } - t.simpleTokensMu.Lock() - username, ok := t.simpleTokens[token] - if ok && t.simpleTokenKeeper != nil { - t.simpleTokenKeeper.resetSimpleToken(token) - } - t.simpleTokensMu.Unlock() - return &AuthInfo{Username: username, Revision: revision}, ok -} - -func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) { - // rev isn't used in simple token, it is only used in JWT - index := ctx.Value("index").(uint64) - simpleToken := ctx.Value("simpleToken").(string) - token := fmt.Sprintf("%s.%d", simpleToken, index) - t.assignSimpleTokenToUser(username, token) - - return token, nil -} - -func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool { - splitted := strings.Split(token, ".") - if len(splitted) != 2 { - return false - } - index, err := strconv.Atoi(splitted[1]) - if err != nil { - return false - } - - select { - case <-t.indexWaiter(uint64(index)): - return true - case <-ctx.Done(): - } - - return false -} - -func newTokenProviderSimple(indexWaiter func(uint64) <-chan struct{}) *tokenSimple { - return &tokenSimple{ - simpleTokens: make(map[string]string), - indexWaiter: indexWaiter, - } -} diff --git a/vendor/github.com/coreos/etcd/auth/store.go b/vendor/github.com/coreos/etcd/auth/store.go deleted file mode 100644 index 3fac7f5a6fd7a..0000000000000 --- a/vendor/github.com/coreos/etcd/auth/store.go +++ /dev/null @@ -1,1059 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "bytes" - "encoding/binary" - "errors" - "sort" - "strings" - "sync" - "sync/atomic" - - "github.com/coreos/etcd/auth/authpb" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/pkg/capnslog" - "golang.org/x/crypto/bcrypt" - "golang.org/x/net/context" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" -) - -var ( - enableFlagKey = []byte("authEnabled") - authEnabled = []byte{1} - authDisabled = []byte{0} - - revisionKey = []byte("authRevision") - - authBucketName = []byte("auth") - authUsersBucketName = []byte("authUsers") - authRolesBucketName = []byte("authRoles") - - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "auth") - - ErrRootUserNotExist = errors.New("auth: root user does not exist") - ErrRootRoleNotExist = errors.New("auth: root user does not have root role") - ErrUserAlreadyExist = errors.New("auth: user already exists") - ErrUserEmpty = errors.New("auth: user name is empty") - ErrUserNotFound = errors.New("auth: user not found") - ErrRoleAlreadyExist = errors.New("auth: role already exists") - ErrRoleNotFound = errors.New("auth: role not found") - ErrAuthFailed = errors.New("auth: authentication failed, invalid user ID or password") - ErrPermissionDenied = errors.New("auth: permission denied") - ErrRoleNotGranted = errors.New("auth: role is not granted to the user") - ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role") - ErrAuthNotEnabled = errors.New("auth: authentication is not enabled") - ErrAuthOldRevision = errors.New("auth: revision in header is old") - ErrInvalidAuthToken = errors.New("auth: invalid auth token") - ErrInvalidAuthOpts = errors.New("auth: invalid auth options") - ErrInvalidAuthMgmt = errors.New("auth: invalid auth management") - - // BcryptCost is the algorithm cost / strength for hashing auth passwords - BcryptCost = bcrypt.DefaultCost -) - -const ( - rootUser = "root" - rootRole = "root" - - revBytesLen = 8 -) - -type AuthInfo struct { - Username string - Revision uint64 -} - -type AuthStore interface { - // AuthEnable turns on the authentication feature - AuthEnable() error - - // AuthDisable turns off the authentication feature - AuthDisable() - - // Authenticate does authentication based on given user name and password - Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) - - // Recover recovers the state of auth store from the given backend - Recover(b backend.Backend) - - // UserAdd adds a new user - UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) - - // UserDelete deletes a user - UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) - - // UserChangePassword changes a password of a user - UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) - - // UserGrantRole grants a role to the user - UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) - - // UserGet gets the detailed information of a users - UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) - - // UserRevokeRole revokes a role of a user - UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) - - // RoleAdd adds a new role - RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) - - // RoleGrantPermission grants a permission to a role - RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) - - // RoleGet gets the detailed information of a role - RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) - - // RoleRevokePermission gets the detailed information of a role - RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) - - // RoleDelete gets the detailed information of a role - RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) - - // UserList gets a list of all users - UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) - - // RoleList gets a list of all roles - RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) - - // IsPutPermitted checks put permission of the user - IsPutPermitted(authInfo *AuthInfo, key []byte) error - - // IsRangePermitted checks range permission of the user - IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error - - // IsDeleteRangePermitted checks delete-range permission of the user - IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error - - // IsAdminPermitted checks admin permission of the user - IsAdminPermitted(authInfo *AuthInfo) error - - // GenTokenPrefix produces a random string in a case of simple token - // in a case of JWT, it produces an empty string - GenTokenPrefix() (string, error) - - // Revision gets current revision of authStore - Revision() uint64 - - // CheckPassword checks a given pair of username and password is correct - CheckPassword(username, password string) (uint64, error) - - // Close does cleanup of AuthStore - Close() error - - // AuthInfoFromCtx gets AuthInfo from gRPC's context - AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) - - // AuthInfoFromTLS gets AuthInfo from TLS info of gRPC's context - AuthInfoFromTLS(ctx context.Context) *AuthInfo -} - -type TokenProvider interface { - info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) - assign(ctx context.Context, username string, revision uint64) (string, error) - enable() - disable() - - invalidateUser(string) - genTokenPrefix() (string, error) -} - -type authStore struct { - // atomic operations; need 64-bit align, or 32-bit tests will crash - revision uint64 - - be backend.Backend - enabled bool - enabledMu sync.RWMutex - - rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions - - tokenProvider TokenProvider -} - -func (as *authStore) AuthEnable() error { - as.enabledMu.Lock() - defer as.enabledMu.Unlock() - if as.enabled { - plog.Noticef("Authentication already enabled") - return nil - } - b := as.be - tx := b.BatchTx() - tx.Lock() - defer func() { - tx.Unlock() - b.ForceCommit() - }() - - u := getUser(tx, rootUser) - if u == nil { - return ErrRootUserNotExist - } - - if !hasRootRole(u) { - return ErrRootRoleNotExist - } - - tx.UnsafePut(authBucketName, enableFlagKey, authEnabled) - - as.enabled = true - as.tokenProvider.enable() - - as.rangePermCache = make(map[string]*unifiedRangePermissions) - - as.setRevision(getRevision(tx)) - - plog.Noticef("Authentication enabled") - - return nil -} - -func (as *authStore) AuthDisable() { - as.enabledMu.Lock() - defer as.enabledMu.Unlock() - if !as.enabled { - return - } - b := as.be - tx := b.BatchTx() - tx.Lock() - tx.UnsafePut(authBucketName, enableFlagKey, authDisabled) - as.commitRevision(tx) - tx.Unlock() - b.ForceCommit() - - as.enabled = false - as.tokenProvider.disable() - - plog.Noticef("Authentication disabled") -} - -func (as *authStore) Close() error { - as.enabledMu.Lock() - defer as.enabledMu.Unlock() - if !as.enabled { - return nil - } - as.tokenProvider.disable() - return nil -} - -func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) { - if !as.isAuthEnabled() { - return nil, ErrAuthNotEnabled - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, username) - if user == nil { - return nil, ErrAuthFailed - } - - // Password checking is already performed in the API layer, so we don't need to check for now. - // Staleness of password can be detected with OCC in the API layer, too. - - token, err := as.tokenProvider.assign(ctx, username, as.Revision()) - if err != nil { - return nil, err - } - - plog.Debugf("authorized %s, token is %s", username, token) - return &pb.AuthenticateResponse{Token: token}, nil -} - -func (as *authStore) CheckPassword(username, password string) (uint64, error) { - if !as.isAuthEnabled() { - return 0, ErrAuthNotEnabled - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, username) - if user == nil { - return 0, ErrAuthFailed - } - - if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil { - plog.Noticef("authentication failed, invalid password for user %s", username) - return 0, ErrAuthFailed - } - - return getRevision(tx), nil -} - -func (as *authStore) Recover(be backend.Backend) { - enabled := false - as.be = be - tx := be.BatchTx() - tx.Lock() - _, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0) - if len(vs) == 1 { - if bytes.Equal(vs[0], authEnabled) { - enabled = true - } - } - - as.setRevision(getRevision(tx)) - - tx.Unlock() - - as.enabledMu.Lock() - as.enabled = enabled - as.enabledMu.Unlock() -} - -func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - if len(r.Name) == 0 { - return nil, ErrUserEmpty - } - - hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost) - if err != nil { - plog.Errorf("failed to hash password: %s", err) - return nil, err - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, r.Name) - if user != nil { - return nil, ErrUserAlreadyExist - } - - newUser := &authpb.User{ - Name: []byte(r.Name), - Password: hashed, - } - - putUser(tx, newUser) - - as.commitRevision(tx) - - plog.Noticef("added a new user: %s", r.Name) - - return &pb.AuthUserAddResponse{}, nil -} - -func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - if as.enabled && strings.Compare(r.Name, rootUser) == 0 { - plog.Errorf("the user root must not be deleted") - return nil, ErrInvalidAuthMgmt - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, r.Name) - if user == nil { - return nil, ErrUserNotFound - } - - delUser(tx, r.Name) - - as.commitRevision(tx) - - as.invalidateCachedPerm(r.Name) - as.tokenProvider.invalidateUser(r.Name) - - plog.Noticef("deleted a user: %s", r.Name) - - return &pb.AuthUserDeleteResponse{}, nil -} - -func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - // TODO(mitake): measure the cost of bcrypt.GenerateFromPassword() - // If the cost is too high, we should move the encryption to outside of the raft - hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost) - if err != nil { - plog.Errorf("failed to hash password: %s", err) - return nil, err - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, r.Name) - if user == nil { - return nil, ErrUserNotFound - } - - updatedUser := &authpb.User{ - Name: []byte(r.Name), - Roles: user.Roles, - Password: hashed, - } - - putUser(tx, updatedUser) - - as.commitRevision(tx) - - as.invalidateCachedPerm(r.Name) - as.tokenProvider.invalidateUser(r.Name) - - plog.Noticef("changed a password of a user: %s", r.Name) - - return &pb.AuthUserChangePasswordResponse{}, nil -} - -func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, r.User) - if user == nil { - return nil, ErrUserNotFound - } - - if r.Role != rootRole { - role := getRole(tx, r.Role) - if role == nil { - return nil, ErrRoleNotFound - } - } - - idx := sort.SearchStrings(user.Roles, r.Role) - if idx < len(user.Roles) && strings.Compare(user.Roles[idx], r.Role) == 0 { - plog.Warningf("user %s is already granted role %s", r.User, r.Role) - return &pb.AuthUserGrantRoleResponse{}, nil - } - - user.Roles = append(user.Roles, r.Role) - sort.Sort(sort.StringSlice(user.Roles)) - - putUser(tx, user) - - as.invalidateCachedPerm(r.User) - - as.commitRevision(tx) - - plog.Noticef("granted role %s to user %s", r.Role, r.User) - return &pb.AuthUserGrantRoleResponse{}, nil -} - -func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - var resp pb.AuthUserGetResponse - - user := getUser(tx, r.Name) - if user == nil { - return nil, ErrUserNotFound - } - resp.Roles = append(resp.Roles, user.Roles...) - return &resp, nil -} - -func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - var resp pb.AuthUserListResponse - - users := getAllUsers(tx) - - for _, u := range users { - resp.Users = append(resp.Users, string(u.Name)) - } - - return &resp, nil -} - -func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - if as.enabled && strings.Compare(r.Name, rootUser) == 0 && strings.Compare(r.Role, rootRole) == 0 { - plog.Errorf("the role root must not be revoked from the user root") - return nil, ErrInvalidAuthMgmt - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, r.Name) - if user == nil { - return nil, ErrUserNotFound - } - - updatedUser := &authpb.User{ - Name: user.Name, - Password: user.Password, - } - - for _, role := range user.Roles { - if strings.Compare(role, r.Role) != 0 { - updatedUser.Roles = append(updatedUser.Roles, role) - } - } - - if len(updatedUser.Roles) == len(user.Roles) { - return nil, ErrRoleNotGranted - } - - putUser(tx, updatedUser) - - as.invalidateCachedPerm(r.Name) - - as.commitRevision(tx) - - plog.Noticef("revoked role %s from user %s", r.Role, r.Name) - return &pb.AuthUserRevokeRoleResponse{}, nil -} - -func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - var resp pb.AuthRoleGetResponse - - role := getRole(tx, r.Role) - if role == nil { - return nil, ErrRoleNotFound - } - resp.Perm = append(resp.Perm, role.KeyPermission...) - return &resp, nil -} - -func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - var resp pb.AuthRoleListResponse - - roles := getAllRoles(tx) - - for _, r := range roles { - resp.Roles = append(resp.Roles, string(r.Name)) - } - - return &resp, nil -} - -func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - role := getRole(tx, r.Role) - if role == nil { - return nil, ErrRoleNotFound - } - - updatedRole := &authpb.Role{ - Name: role.Name, - } - - for _, perm := range role.KeyPermission { - if !bytes.Equal(perm.Key, []byte(r.Key)) || !bytes.Equal(perm.RangeEnd, []byte(r.RangeEnd)) { - updatedRole.KeyPermission = append(updatedRole.KeyPermission, perm) - } - } - - if len(role.KeyPermission) == len(updatedRole.KeyPermission) { - return nil, ErrPermissionNotGranted - } - - putRole(tx, updatedRole) - - // TODO(mitake): currently single role update invalidates every cache - // It should be optimized. - as.clearCachedPerm() - - as.commitRevision(tx) - - plog.Noticef("revoked key %s from role %s", r.Key, r.Role) - return &pb.AuthRoleRevokePermissionResponse{}, nil -} - -func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - if as.enabled && strings.Compare(r.Role, rootRole) == 0 { - plog.Errorf("the role root must not be deleted") - return nil, ErrInvalidAuthMgmt - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - role := getRole(tx, r.Role) - if role == nil { - return nil, ErrRoleNotFound - } - - delRole(tx, r.Role) - - users := getAllUsers(tx) - for _, user := range users { - updatedUser := &authpb.User{ - Name: user.Name, - Password: user.Password, - } - - for _, role := range user.Roles { - if strings.Compare(role, r.Role) != 0 { - updatedUser.Roles = append(updatedUser.Roles, role) - } - } - - if len(updatedUser.Roles) == len(user.Roles) { - continue - } - - putUser(tx, updatedUser) - - as.invalidateCachedPerm(string(user.Name)) - } - - as.commitRevision(tx) - - plog.Noticef("deleted role %s", r.Role) - return &pb.AuthRoleDeleteResponse{}, nil -} - -func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - role := getRole(tx, r.Name) - if role != nil { - return nil, ErrRoleAlreadyExist - } - - newRole := &authpb.Role{ - Name: []byte(r.Name), - } - - putRole(tx, newRole) - - as.commitRevision(tx) - - plog.Noticef("Role %s is created", r.Name) - - return &pb.AuthRoleAddResponse{}, nil -} - -func (as *authStore) authInfoFromToken(ctx context.Context, token string) (*AuthInfo, bool) { - return as.tokenProvider.info(ctx, token, as.Revision()) -} - -type permSlice []*authpb.Permission - -func (perms permSlice) Len() int { - return len(perms) -} - -func (perms permSlice) Less(i, j int) bool { - return bytes.Compare(perms[i].Key, perms[j].Key) < 0 -} - -func (perms permSlice) Swap(i, j int) { - perms[i], perms[j] = perms[j], perms[i] -} - -func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - role := getRole(tx, r.Name) - if role == nil { - return nil, ErrRoleNotFound - } - - idx := sort.Search(len(role.KeyPermission), func(i int) bool { - return bytes.Compare(role.KeyPermission[i].Key, []byte(r.Perm.Key)) >= 0 - }) - - if idx < len(role.KeyPermission) && bytes.Equal(role.KeyPermission[idx].Key, r.Perm.Key) && bytes.Equal(role.KeyPermission[idx].RangeEnd, r.Perm.RangeEnd) { - // update existing permission - role.KeyPermission[idx].PermType = r.Perm.PermType - } else { - // append new permission to the role - newPerm := &authpb.Permission{ - Key: []byte(r.Perm.Key), - RangeEnd: []byte(r.Perm.RangeEnd), - PermType: r.Perm.PermType, - } - - role.KeyPermission = append(role.KeyPermission, newPerm) - sort.Sort(permSlice(role.KeyPermission)) - } - - putRole(tx, role) - - // TODO(mitake): currently single role update invalidates every cache - // It should be optimized. - as.clearCachedPerm() - - as.commitRevision(tx) - - plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)]) - - return &pb.AuthRoleGrantPermissionResponse{}, nil -} - -func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeEnd []byte, permTyp authpb.Permission_Type) error { - // TODO(mitake): this function would be costly so we need a caching mechanism - if !as.isAuthEnabled() { - return nil - } - - // only gets rev == 0 when passed AuthInfo{}; no user given - if revision == 0 { - return ErrUserEmpty - } - - if revision < as.Revision() { - return ErrAuthOldRevision - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, userName) - if user == nil { - plog.Errorf("invalid user name %s for permission checking", userName) - return ErrPermissionDenied - } - - // root role should have permission on all ranges - if hasRootRole(user) { - return nil - } - - if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) { - return nil - } - - return ErrPermissionDenied -} - -func (as *authStore) IsPutPermitted(authInfo *AuthInfo, key []byte) error { - return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, nil, authpb.WRITE) -} - -func (as *authStore) IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error { - return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.READ) -} - -func (as *authStore) IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error { - return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.WRITE) -} - -func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error { - if !as.isAuthEnabled() { - return nil - } - if authInfo == nil { - return ErrUserEmpty - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - u := getUser(tx, authInfo.Username) - if u == nil { - return ErrUserNotFound - } - - if !hasRootRole(u) { - return ErrPermissionDenied - } - - return nil -} - -func getUser(tx backend.BatchTx, username string) *authpb.User { - _, vs := tx.UnsafeRange(authUsersBucketName, []byte(username), nil, 0) - if len(vs) == 0 { - return nil - } - - user := &authpb.User{} - err := user.Unmarshal(vs[0]) - if err != nil { - plog.Panicf("failed to unmarshal user struct (name: %s): %s", username, err) - } - return user -} - -func getAllUsers(tx backend.BatchTx) []*authpb.User { - _, vs := tx.UnsafeRange(authUsersBucketName, []byte{0}, []byte{0xff}, -1) - if len(vs) == 0 { - return nil - } - - var users []*authpb.User - - for _, v := range vs { - user := &authpb.User{} - err := user.Unmarshal(v) - if err != nil { - plog.Panicf("failed to unmarshal user struct: %s", err) - } - - users = append(users, user) - } - - return users -} - -func putUser(tx backend.BatchTx, user *authpb.User) { - b, err := user.Marshal() - if err != nil { - plog.Panicf("failed to marshal user struct (name: %s): %s", user.Name, err) - } - tx.UnsafePut(authUsersBucketName, user.Name, b) -} - -func delUser(tx backend.BatchTx, username string) { - tx.UnsafeDelete(authUsersBucketName, []byte(username)) -} - -func getRole(tx backend.BatchTx, rolename string) *authpb.Role { - _, vs := tx.UnsafeRange(authRolesBucketName, []byte(rolename), nil, 0) - if len(vs) == 0 { - return nil - } - - role := &authpb.Role{} - err := role.Unmarshal(vs[0]) - if err != nil { - plog.Panicf("failed to unmarshal role struct (name: %s): %s", rolename, err) - } - return role -} - -func getAllRoles(tx backend.BatchTx) []*authpb.Role { - _, vs := tx.UnsafeRange(authRolesBucketName, []byte{0}, []byte{0xff}, -1) - if len(vs) == 0 { - return nil - } - - var roles []*authpb.Role - - for _, v := range vs { - role := &authpb.Role{} - err := role.Unmarshal(v) - if err != nil { - plog.Panicf("failed to unmarshal role struct: %s", err) - } - - roles = append(roles, role) - } - - return roles -} - -func putRole(tx backend.BatchTx, role *authpb.Role) { - b, err := role.Marshal() - if err != nil { - plog.Panicf("failed to marshal role struct (name: %s): %s", role.Name, err) - } - - tx.UnsafePut(authRolesBucketName, []byte(role.Name), b) -} - -func delRole(tx backend.BatchTx, rolename string) { - tx.UnsafeDelete(authRolesBucketName, []byte(rolename)) -} - -func (as *authStore) isAuthEnabled() bool { - as.enabledMu.RLock() - defer as.enabledMu.RUnlock() - return as.enabled -} - -func NewAuthStore(be backend.Backend, tp TokenProvider) *authStore { - tx := be.BatchTx() - tx.Lock() - - tx.UnsafeCreateBucket(authBucketName) - tx.UnsafeCreateBucket(authUsersBucketName) - tx.UnsafeCreateBucket(authRolesBucketName) - - enabled := false - _, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0) - if len(vs) == 1 { - if bytes.Equal(vs[0], authEnabled) { - enabled = true - } - } - - as := &authStore{ - be: be, - revision: getRevision(tx), - enabled: enabled, - rangePermCache: make(map[string]*unifiedRangePermissions), - tokenProvider: tp, - } - - if enabled { - as.tokenProvider.enable() - } - - if as.Revision() == 0 { - as.commitRevision(tx) - } - - tx.Unlock() - be.ForceCommit() - - return as -} - -func hasRootRole(u *authpb.User) bool { - for _, r := range u.Roles { - if r == rootRole { - return true - } - } - return false -} - -func (as *authStore) commitRevision(tx backend.BatchTx) { - atomic.AddUint64(&as.revision, 1) - revBytes := make([]byte, revBytesLen) - binary.BigEndian.PutUint64(revBytes, as.Revision()) - tx.UnsafePut(authBucketName, revisionKey, revBytes) -} - -func getRevision(tx backend.BatchTx) uint64 { - _, vs := tx.UnsafeRange(authBucketName, []byte(revisionKey), nil, 0) - if len(vs) != 1 { - // this can happen in the initialization phase - return 0 - } - - return binary.BigEndian.Uint64(vs[0]) -} - -func (as *authStore) setRevision(rev uint64) { - atomic.StoreUint64(&as.revision, rev) -} - -func (as *authStore) Revision() uint64 { - return atomic.LoadUint64(&as.revision) -} - -func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo { - peer, ok := peer.FromContext(ctx) - if !ok || peer == nil || peer.AuthInfo == nil { - return nil - } - - tlsInfo := peer.AuthInfo.(credentials.TLSInfo) - for _, chains := range tlsInfo.State.VerifiedChains { - for _, chain := range chains { - cn := chain.Subject.CommonName - plog.Debugf("found common name %s", cn) - - return &AuthInfo{ - Username: cn, - Revision: as.Revision(), - } - } - } - - return nil -} - -func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil, nil - } - - ts, tok := md["token"] - if !tok { - return nil, nil - } - - token := ts[0] - authInfo, uok := as.authInfoFromToken(ctx, token) - if !uok { - plog.Warningf("invalid auth token: %s", token) - return nil, ErrInvalidAuthToken - } - return authInfo, nil -} - -func (as *authStore) GenTokenPrefix() (string, error) { - return as.tokenProvider.genTokenPrefix() -} - -func decomposeOpts(optstr string) (string, map[string]string, error) { - opts := strings.Split(optstr, ",") - tokenType := opts[0] - - typeSpecificOpts := make(map[string]string) - for i := 1; i < len(opts); i++ { - pair := strings.Split(opts[i], "=") - - if len(pair) != 2 { - plog.Errorf("invalid token specific option: %s", optstr) - return "", nil, ErrInvalidAuthOpts - } - - if _, ok := typeSpecificOpts[pair[0]]; ok { - plog.Errorf("invalid token specific option, duplicated parameters (%s): %s", pair[0], optstr) - return "", nil, ErrInvalidAuthOpts - } - - typeSpecificOpts[pair[0]] = pair[1] - } - - return tokenType, typeSpecificOpts, nil - -} - -func NewTokenProvider(tokenOpts string, indexWaiter func(uint64) <-chan struct{}) (TokenProvider, error) { - tokenType, typeSpecificOpts, err := decomposeOpts(tokenOpts) - if err != nil { - return nil, ErrInvalidAuthOpts - } - - switch tokenType { - case "simple": - plog.Warningf("simple token is not cryptographically signed") - return newTokenProviderSimple(indexWaiter), nil - case "jwt": - return newTokenProviderJWT(typeSpecificOpts) - default: - plog.Errorf("unknown token type: %s", tokenType) - return nil, ErrInvalidAuthOpts - } -} diff --git a/vendor/github.com/coreos/etcd/client/auth_role.go b/vendor/github.com/coreos/etcd/client/auth_role.go deleted file mode 100644 index d15e00dd7160f..0000000000000 --- a/vendor/github.com/coreos/etcd/client/auth_role.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "encoding/json" - "net/http" - "net/url" - - "golang.org/x/net/context" -) - -type Role struct { - Role string `json:"role"` - Permissions Permissions `json:"permissions"` - Grant *Permissions `json:"grant,omitempty"` - Revoke *Permissions `json:"revoke,omitempty"` -} - -type Permissions struct { - KV rwPermission `json:"kv"` -} - -type rwPermission struct { - Read []string `json:"read"` - Write []string `json:"write"` -} - -type PermissionType int - -const ( - ReadPermission PermissionType = iota - WritePermission - ReadWritePermission -) - -// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to -// interact with etcd's role creation and modification features. -func NewAuthRoleAPI(c Client) AuthRoleAPI { - return &httpAuthRoleAPI{ - client: c, - } -} - -type AuthRoleAPI interface { - // AddRole adds a role. - AddRole(ctx context.Context, role string) error - - // RemoveRole removes a role. - RemoveRole(ctx context.Context, role string) error - - // GetRole retrieves role details. - GetRole(ctx context.Context, role string) (*Role, error) - - // GrantRoleKV grants a role some permission prefixes for the KV store. - GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) - - // RevokeRoleKV revokes some permission prefixes for a role on the KV store. - RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) - - // ListRoles lists roles. - ListRoles(ctx context.Context) ([]string, error) -} - -type httpAuthRoleAPI struct { - client httpClient -} - -type authRoleAPIAction struct { - verb string - name string - role *Role -} - -type authRoleAPIList struct{} - -func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "roles", "") - req, _ := http.NewRequest("GET", u.String(), nil) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "roles", l.name) - if l.role == nil { - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req - } - b, err := json.Marshal(l.role) - if err != nil { - panic(err) - } - body := bytes.NewReader(b) - req, _ := http.NewRequest(l.verb, u.String(), body) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) { - resp, body, err := r.client.Do(ctx, &authRoleAPIList{}) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - return nil, err - } - var roleList struct { - Roles []Role `json:"roles"` - } - if err = json.Unmarshal(body, &roleList); err != nil { - return nil, err - } - ret := make([]string, 0, len(roleList.Roles)) - for _, r := range roleList.Roles { - ret = append(ret, r.Role) - } - return ret, nil -} - -func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error { - role := &Role{ - Role: rolename, - } - return r.addRemoveRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error { - return r.addRemoveRole(ctx, &authRoleAPIAction{ - verb: "DELETE", - name: rolename, - }) -} - -func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error { - resp, body, err := r.client.Do(ctx, req) - if err != nil { - return err - } - if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err := json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) { - return r.modRole(ctx, &authRoleAPIAction{ - verb: "GET", - name: rolename, - }) -} - -func buildRWPermission(prefixes []string, permType PermissionType) rwPermission { - var out rwPermission - switch permType { - case ReadPermission: - out.Read = prefixes - case WritePermission: - out.Write = prefixes - case ReadWritePermission: - out.Read = prefixes - out.Write = prefixes - } - return out -} - -func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { - rwp := buildRWPermission(prefixes, permType) - role := &Role{ - Role: rolename, - Grant: &Permissions{ - KV: rwp, - }, - } - return r.modRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { - rwp := buildRWPermission(prefixes, permType) - role := &Role{ - Role: rolename, - Revoke: &Permissions{ - KV: rwp, - }, - } - return r.modRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) { - resp, body, err := r.client.Do(ctx, req) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - var role Role - if err = json.Unmarshal(body, &role); err != nil { - return nil, err - } - return &role, nil -} diff --git a/vendor/github.com/coreos/etcd/client/auth_user.go b/vendor/github.com/coreos/etcd/client/auth_user.go deleted file mode 100644 index 97c3f31813b59..0000000000000 --- a/vendor/github.com/coreos/etcd/client/auth_user.go +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "encoding/json" - "net/http" - "net/url" - "path" - - "golang.org/x/net/context" -) - -var ( - defaultV2AuthPrefix = "/v2/auth" -) - -type User struct { - User string `json:"user"` - Password string `json:"password,omitempty"` - Roles []string `json:"roles"` - Grant []string `json:"grant,omitempty"` - Revoke []string `json:"revoke,omitempty"` -} - -// userListEntry is the user representation given by the server for ListUsers -type userListEntry struct { - User string `json:"user"` - Roles []Role `json:"roles"` -} - -type UserRoles struct { - User string `json:"user"` - Roles []Role `json:"roles"` -} - -func v2AuthURL(ep url.URL, action string, name string) *url.URL { - if name != "" { - ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name) - return &ep - } - ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action) - return &ep -} - -// NewAuthAPI constructs a new AuthAPI that uses HTTP to -// interact with etcd's general auth features. -func NewAuthAPI(c Client) AuthAPI { - return &httpAuthAPI{ - client: c, - } -} - -type AuthAPI interface { - // Enable auth. - Enable(ctx context.Context) error - - // Disable auth. - Disable(ctx context.Context) error -} - -type httpAuthAPI struct { - client httpClient -} - -func (s *httpAuthAPI) Enable(ctx context.Context) error { - return s.enableDisable(ctx, &authAPIAction{"PUT"}) -} - -func (s *httpAuthAPI) Disable(ctx context.Context) error { - return s.enableDisable(ctx, &authAPIAction{"DELETE"}) -} - -func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error { - resp, body, err := s.client.Do(ctx, req) - if err != nil { - return err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -type authAPIAction struct { - verb string -} - -func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "enable", "") - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req -} - -type authError struct { - Message string `json:"message"` - Code int `json:"-"` -} - -func (e authError) Error() string { - return e.Message -} - -// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to -// interact with etcd's user creation and modification features. -func NewAuthUserAPI(c Client) AuthUserAPI { - return &httpAuthUserAPI{ - client: c, - } -} - -type AuthUserAPI interface { - // AddUser adds a user. - AddUser(ctx context.Context, username string, password string) error - - // RemoveUser removes a user. - RemoveUser(ctx context.Context, username string) error - - // GetUser retrieves user details. - GetUser(ctx context.Context, username string) (*User, error) - - // GrantUser grants a user some permission roles. - GrantUser(ctx context.Context, username string, roles []string) (*User, error) - - // RevokeUser revokes some permission roles from a user. - RevokeUser(ctx context.Context, username string, roles []string) (*User, error) - - // ChangePassword changes the user's password. - ChangePassword(ctx context.Context, username string, password string) (*User, error) - - // ListUsers lists the users. - ListUsers(ctx context.Context) ([]string, error) -} - -type httpAuthUserAPI struct { - client httpClient -} - -type authUserAPIAction struct { - verb string - username string - user *User -} - -type authUserAPIList struct{} - -func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "users", "") - req, _ := http.NewRequest("GET", u.String(), nil) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "users", l.username) - if l.user == nil { - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req - } - b, err := json.Marshal(l.user) - if err != nil { - panic(err) - } - body := bytes.NewReader(b) - req, _ := http.NewRequest(l.verb, u.String(), body) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) { - resp, body, err := u.client.Do(ctx, &authUserAPIList{}) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - - var userList struct { - Users []userListEntry `json:"users"` - } - - if err = json.Unmarshal(body, &userList); err != nil { - return nil, err - } - - ret := make([]string, 0, len(userList.Users)) - for _, u := range userList.Users { - ret = append(ret, u.User) - } - return ret, nil -} - -func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error { - user := &User{ - User: username, - Password: password, - } - return u.addRemoveUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error { - return u.addRemoveUser(ctx, &authUserAPIAction{ - verb: "DELETE", - username: username, - }) -} - -func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error { - resp, body, err := u.client.Do(ctx, req) - if err != nil { - return err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) { - return u.modUser(ctx, &authUserAPIAction{ - verb: "GET", - username: username, - }) -} - -func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) { - user := &User{ - User: username, - Grant: roles, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) { - user := &User{ - User: username, - Revoke: roles, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) { - user := &User{ - User: username, - Password: password, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) { - resp, body, err := u.client.Do(ctx, req) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - var user User - if err = json.Unmarshal(body, &user); err != nil { - var userR UserRoles - if urerr := json.Unmarshal(body, &userR); urerr != nil { - return nil, err - } - user.User = userR.User - for _, r := range userR.Roles { - user.Roles = append(user.Roles, r.Role) - } - } - return &user, nil -} diff --git a/vendor/github.com/coreos/etcd/client/cancelreq.go b/vendor/github.com/coreos/etcd/client/cancelreq.go deleted file mode 100644 index 76d1f040198bc..0000000000000 --- a/vendor/github.com/coreos/etcd/client/cancelreq.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// borrowed from golang/net/context/ctxhttp/cancelreq.go - -package client - -import "net/http" - -func requestCanceler(tr CancelableTransport, req *http.Request) func() { - ch := make(chan struct{}) - req.Cancel = ch - - return func() { - close(ch) - } -} diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go deleted file mode 100644 index 19ce2ec01dad8..0000000000000 --- a/vendor/github.com/coreos/etcd/client/client.go +++ /dev/null @@ -1,704 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "math/rand" - "net" - "net/http" - "net/url" - "sort" - "strconv" - "sync" - "time" - - "github.com/coreos/etcd/version" - - "golang.org/x/net/context" -) - -var ( - ErrNoEndpoints = errors.New("client: no endpoints available") - ErrTooManyRedirects = errors.New("client: too many redirects") - ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured") - ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available") - errTooManyRedirectChecks = errors.New("client: too many redirect checks") - - // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so - // that Do() will not retry a request - oneShotCtxValue interface{} -) - -var DefaultRequestTimeout = 5 * time.Second - -var DefaultTransport CancelableTransport = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, -} - -type EndpointSelectionMode int - -const ( - // EndpointSelectionRandom is the default value of the 'SelectionMode'. - // As the name implies, the client object will pick a node from the members - // of the cluster in a random fashion. If the cluster has three members, A, B, - // and C, the client picks any node from its three members as its request - // destination. - EndpointSelectionRandom EndpointSelectionMode = iota - - // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader', - // requests are sent directly to the cluster leader. This reduces - // forwarding roundtrips compared to making requests to etcd followers - // who then forward them to the cluster leader. In the event of a leader - // failure, however, clients configured this way cannot prioritize among - // the remaining etcd followers. Therefore, when a client sets 'SelectionMode' - // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to - // maintain its knowledge of current cluster state. - // - // This mode should be used with Client.AutoSync(). - EndpointSelectionPrioritizeLeader -) - -type Config struct { - // Endpoints defines a set of URLs (schemes, hosts and ports only) - // that can be used to communicate with a logical etcd cluster. For - // example, a three-node cluster could be provided like so: - // - // Endpoints: []string{ - // "http://node1.example.com:2379", - // "http://node2.example.com:2379", - // "http://node3.example.com:2379", - // } - // - // If multiple endpoints are provided, the Client will attempt to - // use them all in the event that one or more of them are unusable. - // - // If Client.Sync is ever called, the Client may cache an alternate - // set of endpoints to continue operation. - Endpoints []string - - // Transport is used by the Client to drive HTTP requests. If not - // provided, DefaultTransport will be used. - Transport CancelableTransport - - // CheckRedirect specifies the policy for handling HTTP redirects. - // If CheckRedirect is not nil, the Client calls it before - // following an HTTP redirect. The sole argument is the number of - // requests that have already been made. If CheckRedirect returns - // an error, Client.Do will not make any further requests and return - // the error back it to the caller. - // - // If CheckRedirect is nil, the Client uses its default policy, - // which is to stop after 10 consecutive requests. - CheckRedirect CheckRedirectFunc - - // Username specifies the user credential to add as an authorization header - Username string - - // Password is the password for the specified user to add as an authorization header - // to the request. - Password string - - // HeaderTimeoutPerRequest specifies the time limit to wait for response - // header in a single request made by the Client. The timeout includes - // connection time, any redirects, and header wait time. - // - // For non-watch GET request, server returns the response body immediately. - // For PUT/POST/DELETE request, server will attempt to commit request - // before responding, which is expected to take `100ms + 2 * RTT`. - // For watch request, server returns the header immediately to notify Client - // watch start. But if server is behind some kind of proxy, the response - // header may be cached at proxy, and Client cannot rely on this behavior. - // - // Especially, wait request will ignore this timeout. - // - // One API call may send multiple requests to different etcd servers until it - // succeeds. Use context of the API to specify the overall timeout. - // - // A HeaderTimeoutPerRequest of zero means no timeout. - HeaderTimeoutPerRequest time.Duration - - // SelectionMode is an EndpointSelectionMode enum that specifies the - // policy for choosing the etcd cluster node to which requests are sent. - SelectionMode EndpointSelectionMode -} - -func (cfg *Config) transport() CancelableTransport { - if cfg.Transport == nil { - return DefaultTransport - } - return cfg.Transport -} - -func (cfg *Config) checkRedirect() CheckRedirectFunc { - if cfg.CheckRedirect == nil { - return DefaultCheckRedirect - } - return cfg.CheckRedirect -} - -// CancelableTransport mimics net/http.Transport, but requires that -// the object also support request cancellation. -type CancelableTransport interface { - http.RoundTripper - CancelRequest(req *http.Request) -} - -type CheckRedirectFunc func(via int) error - -// DefaultCheckRedirect follows up to 10 redirects, but no more. -var DefaultCheckRedirect CheckRedirectFunc = func(via int) error { - if via > 10 { - return ErrTooManyRedirects - } - return nil -} - -type Client interface { - // Sync updates the internal cache of the etcd cluster's membership. - Sync(context.Context) error - - // AutoSync periodically calls Sync() every given interval. - // The recommended sync interval is 10 seconds to 1 minute, which does - // not bring too much overhead to server and makes client catch up the - // cluster change in time. - // - // The example to use it: - // - // for { - // err := client.AutoSync(ctx, 10*time.Second) - // if err == context.DeadlineExceeded || err == context.Canceled { - // break - // } - // log.Print(err) - // } - AutoSync(context.Context, time.Duration) error - - // Endpoints returns a copy of the current set of API endpoints used - // by Client to resolve HTTP requests. If Sync has ever been called, - // this may differ from the initial Endpoints provided in the Config. - Endpoints() []string - - // SetEndpoints sets the set of API endpoints used by Client to resolve - // HTTP requests. If the given endpoints are not valid, an error will be - // returned - SetEndpoints(eps []string) error - - // GetVersion retrieves the current etcd server and cluster version - GetVersion(ctx context.Context) (*version.Versions, error) - - httpClient -} - -func New(cfg Config) (Client, error) { - c := &httpClusterClient{ - clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest), - rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), - selectionMode: cfg.SelectionMode, - } - if cfg.Username != "" { - c.credentials = &credentials{ - username: cfg.Username, - password: cfg.Password, - } - } - if err := c.SetEndpoints(cfg.Endpoints); err != nil { - return nil, err - } - return c, nil -} - -type httpClient interface { - Do(context.Context, httpAction) (*http.Response, []byte, error) -} - -func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory { - return func(ep url.URL) httpClient { - return &redirectFollowingHTTPClient{ - checkRedirect: cr, - client: &simpleHTTPClient{ - transport: tr, - endpoint: ep, - headerTimeout: headerTimeout, - }, - } - } -} - -type credentials struct { - username string - password string -} - -type httpClientFactory func(url.URL) httpClient - -type httpAction interface { - HTTPRequest(url.URL) *http.Request -} - -type httpClusterClient struct { - clientFactory httpClientFactory - endpoints []url.URL - pinned int - credentials *credentials - sync.RWMutex - rand *rand.Rand - selectionMode EndpointSelectionMode -} - -func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) { - ceps := make([]url.URL, len(eps)) - copy(ceps, eps) - - // To perform a lookup on the new endpoint list without using the current - // client, we'll copy it - clientCopy := &httpClusterClient{ - clientFactory: c.clientFactory, - credentials: c.credentials, - rand: c.rand, - - pinned: 0, - endpoints: ceps, - } - - mAPI := NewMembersAPI(clientCopy) - leader, err := mAPI.Leader(ctx) - if err != nil { - return "", err - } - if len(leader.ClientURLs) == 0 { - return "", ErrNoLeaderEndpoint - } - - return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs? -} - -func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) { - if len(eps) == 0 { - return []url.URL{}, ErrNoEndpoints - } - - neps := make([]url.URL, len(eps)) - for i, ep := range eps { - u, err := url.Parse(ep) - if err != nil { - return []url.URL{}, err - } - neps[i] = *u - } - return neps, nil -} - -func (c *httpClusterClient) SetEndpoints(eps []string) error { - neps, err := c.parseEndpoints(eps) - if err != nil { - return err - } - - c.Lock() - defer c.Unlock() - - c.endpoints = shuffleEndpoints(c.rand, neps) - // We're not doing anything for PrioritizeLeader here. This is - // due to not having a context meaning we can't call getLeaderEndpoint - // However, if you're using PrioritizeLeader, you've already been told - // to regularly call sync, where we do have a ctx, and can figure the - // leader. PrioritizeLeader is also quite a loose guarantee, so deal - // with it - c.pinned = 0 - - return nil -} - -func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - action := act - c.RLock() - leps := len(c.endpoints) - eps := make([]url.URL, leps) - n := copy(eps, c.endpoints) - pinned := c.pinned - - if c.credentials != nil { - action = &authedAction{ - act: act, - credentials: *c.credentials, - } - } - c.RUnlock() - - if leps == 0 { - return nil, nil, ErrNoEndpoints - } - - if leps != n { - return nil, nil, errors.New("unable to pick endpoint: copy failed") - } - - var resp *http.Response - var body []byte - var err error - cerr := &ClusterError{} - isOneShot := ctx.Value(&oneShotCtxValue) != nil - - for i := pinned; i < leps+pinned; i++ { - k := i % leps - hc := c.clientFactory(eps[k]) - resp, body, err = hc.Do(ctx, action) - if err != nil { - cerr.Errors = append(cerr.Errors, err) - if err == ctx.Err() { - return nil, nil, ctx.Err() - } - if err == context.Canceled || err == context.DeadlineExceeded { - return nil, nil, err - } - } else if resp.StatusCode/100 == 5 { - switch resp.StatusCode { - case http.StatusInternalServerError, http.StatusServiceUnavailable: - // TODO: make sure this is a no leader response - cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String())) - default: - cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) - } - err = cerr.Errors[0] - } - if err != nil { - if !isOneShot { - continue - } - c.Lock() - c.pinned = (k + 1) % leps - c.Unlock() - return nil, nil, err - } - if k != pinned { - c.Lock() - c.pinned = k - c.Unlock() - } - return resp, body, nil - } - - return nil, nil, cerr -} - -func (c *httpClusterClient) Endpoints() []string { - c.RLock() - defer c.RUnlock() - - eps := make([]string, len(c.endpoints)) - for i, ep := range c.endpoints { - eps[i] = ep.String() - } - - return eps -} - -func (c *httpClusterClient) Sync(ctx context.Context) error { - mAPI := NewMembersAPI(c) - ms, err := mAPI.List(ctx) - if err != nil { - return err - } - - var eps []string - for _, m := range ms { - eps = append(eps, m.ClientURLs...) - } - - neps, err := c.parseEndpoints(eps) - if err != nil { - return err - } - - npin := 0 - - switch c.selectionMode { - case EndpointSelectionRandom: - c.RLock() - eq := endpointsEqual(c.endpoints, neps) - c.RUnlock() - - if eq { - return nil - } - // When items in the endpoint list changes, we choose a new pin - neps = shuffleEndpoints(c.rand, neps) - case EndpointSelectionPrioritizeLeader: - nle, err := c.getLeaderEndpoint(ctx, neps) - if err != nil { - return ErrNoLeaderEndpoint - } - - for i, n := range neps { - if n.String() == nle { - npin = i - break - } - } - default: - return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode) - } - - c.Lock() - defer c.Unlock() - c.endpoints = neps - c.pinned = npin - - return nil -} - -func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error { - ticker := time.NewTicker(interval) - defer ticker.Stop() - for { - err := c.Sync(ctx) - if err != nil { - return err - } - select { - case <-ctx.Done(): - return ctx.Err() - case <-ticker.C: - } - } -} - -func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) { - act := &getAction{Prefix: "/version"} - - resp, body, err := c.Do(ctx, act) - if err != nil { - return nil, err - } - - switch resp.StatusCode { - case http.StatusOK: - if len(body) == 0 { - return nil, ErrEmptyBody - } - var vresp version.Versions - if err := json.Unmarshal(body, &vresp); err != nil { - return nil, ErrInvalidJSON - } - return &vresp, nil - default: - var etcdErr Error - if err := json.Unmarshal(body, &etcdErr); err != nil { - return nil, ErrInvalidJSON - } - return nil, etcdErr - } -} - -type roundTripResponse struct { - resp *http.Response - err error -} - -type simpleHTTPClient struct { - transport CancelableTransport - endpoint url.URL - headerTimeout time.Duration -} - -func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - req := act.HTTPRequest(c.endpoint) - - if err := printcURL(req); err != nil { - return nil, nil, err - } - - isWait := false - if req != nil && req.URL != nil { - ws := req.URL.Query().Get("wait") - if len(ws) != 0 { - var err error - isWait, err = strconv.ParseBool(ws) - if err != nil { - return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req) - } - } - } - - var hctx context.Context - var hcancel context.CancelFunc - if !isWait && c.headerTimeout > 0 { - hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout) - } else { - hctx, hcancel = context.WithCancel(ctx) - } - defer hcancel() - - reqcancel := requestCanceler(c.transport, req) - - rtchan := make(chan roundTripResponse, 1) - go func() { - resp, err := c.transport.RoundTrip(req) - rtchan <- roundTripResponse{resp: resp, err: err} - close(rtchan) - }() - - var resp *http.Response - var err error - - select { - case rtresp := <-rtchan: - resp, err = rtresp.resp, rtresp.err - case <-hctx.Done(): - // cancel and wait for request to actually exit before continuing - reqcancel() - rtresp := <-rtchan - resp = rtresp.resp - switch { - case ctx.Err() != nil: - err = ctx.Err() - case hctx.Err() != nil: - err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String()) - default: - panic("failed to get error from context") - } - } - - // always check for resp nil-ness to deal with possible - // race conditions between channels above - defer func() { - if resp != nil { - resp.Body.Close() - } - }() - - if err != nil { - return nil, nil, err - } - - var body []byte - done := make(chan struct{}) - go func() { - body, err = ioutil.ReadAll(resp.Body) - done <- struct{}{} - }() - - select { - case <-ctx.Done(): - resp.Body.Close() - <-done - return nil, nil, ctx.Err() - case <-done: - } - - return resp, body, err -} - -type authedAction struct { - act httpAction - credentials credentials -} - -func (a *authedAction) HTTPRequest(url url.URL) *http.Request { - r := a.act.HTTPRequest(url) - r.SetBasicAuth(a.credentials.username, a.credentials.password) - return r -} - -type redirectFollowingHTTPClient struct { - client httpClient - checkRedirect CheckRedirectFunc -} - -func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - next := act - for i := 0; i < 100; i++ { - if i > 0 { - if err := r.checkRedirect(i); err != nil { - return nil, nil, err - } - } - resp, body, err := r.client.Do(ctx, next) - if err != nil { - return nil, nil, err - } - if resp.StatusCode/100 == 3 { - hdr := resp.Header.Get("Location") - if hdr == "" { - return nil, nil, fmt.Errorf("Location header not set") - } - loc, err := url.Parse(hdr) - if err != nil { - return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr) - } - next = &redirectedHTTPAction{ - action: act, - location: *loc, - } - continue - } - return resp, body, nil - } - - return nil, nil, errTooManyRedirectChecks -} - -type redirectedHTTPAction struct { - action httpAction - location url.URL -} - -func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request { - orig := r.action.HTTPRequest(ep) - orig.URL = &r.location - return orig -} - -func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL { - p := r.Perm(len(eps)) - neps := make([]url.URL, len(eps)) - for i, k := range p { - neps[i] = eps[k] - } - return neps -} - -func endpointsEqual(left, right []url.URL) bool { - if len(left) != len(right) { - return false - } - - sLeft := make([]string, len(left)) - sRight := make([]string, len(right)) - for i, l := range left { - sLeft[i] = l.String() - } - for i, r := range right { - sRight[i] = r.String() - } - - sort.Strings(sLeft) - sort.Strings(sRight) - for i := range sLeft { - if sLeft[i] != sRight[i] { - return false - } - } - return true -} diff --git a/vendor/github.com/coreos/etcd/client/cluster_error.go b/vendor/github.com/coreos/etcd/client/cluster_error.go deleted file mode 100644 index 34618cdbd9e60..0000000000000 --- a/vendor/github.com/coreos/etcd/client/cluster_error.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import "fmt" - -type ClusterError struct { - Errors []error -} - -func (ce *ClusterError) Error() string { - s := ErrClusterUnavailable.Error() - for i, e := range ce.Errors { - s += fmt.Sprintf("; error #%d: %s\n", i, e) - } - return s -} - -func (ce *ClusterError) Detail() string { - s := "" - for i, e := range ce.Errors { - s += fmt.Sprintf("error #%d: %s\n", i, e) - } - return s -} diff --git a/vendor/github.com/coreos/etcd/client/curl.go b/vendor/github.com/coreos/etcd/client/curl.go deleted file mode 100644 index c8bc9fba20ecc..0000000000000 --- a/vendor/github.com/coreos/etcd/client/curl.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "os" -) - -var ( - cURLDebug = false -) - -func EnablecURLDebug() { - cURLDebug = true -} - -func DisablecURLDebug() { - cURLDebug = false -} - -// printcURL prints the cURL equivalent request to stderr. -// It returns an error if the body of the request cannot -// be read. -// The caller MUST cancel the request if there is an error. -func printcURL(req *http.Request) error { - if !cURLDebug { - return nil - } - var ( - command string - b []byte - err error - ) - - if req.URL != nil { - command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String()) - } - - if req.Body != nil { - b, err = ioutil.ReadAll(req.Body) - if err != nil { - return err - } - command += fmt.Sprintf(" -d %q", string(b)) - } - - fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command) - - // reset body - body := bytes.NewBuffer(b) - req.Body = ioutil.NopCloser(body) - - return nil -} diff --git a/vendor/github.com/coreos/etcd/client/discover.go b/vendor/github.com/coreos/etcd/client/discover.go deleted file mode 100644 index 442e35fe543b9..0000000000000 --- a/vendor/github.com/coreos/etcd/client/discover.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "github.com/coreos/etcd/pkg/srv" -) - -// Discoverer is an interface that wraps the Discover method. -type Discoverer interface { - // Discover looks up the etcd servers for the domain. - Discover(domain string) ([]string, error) -} - -type srvDiscover struct{} - -// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. -func NewSRVDiscover() Discoverer { - return &srvDiscover{} -} - -func (d *srvDiscover) Discover(domain string) ([]string, error) { - srvs, err := srv.GetClient("etcd-client", domain) - if err != nil { - return nil, err - } - return srvs.Endpoints, nil -} diff --git a/vendor/github.com/coreos/etcd/client/doc.go b/vendor/github.com/coreos/etcd/client/doc.go deleted file mode 100644 index 32fdfb52c6704..0000000000000 --- a/vendor/github.com/coreos/etcd/client/doc.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package client provides bindings for the etcd APIs. - -Create a Config and exchange it for a Client: - - import ( - "net/http" - - "github.com/coreos/etcd/client" - "golang.org/x/net/context" - ) - - cfg := client.Config{ - Endpoints: []string{"http://127.0.0.1:2379"}, - Transport: DefaultTransport, - } - - c, err := client.New(cfg) - if err != nil { - // handle error - } - -Clients are safe for concurrent use by multiple goroutines. - -Create a KeysAPI using the Client, then use it to interact with etcd: - - kAPI := client.NewKeysAPI(c) - - // create a new key /foo with the value "bar" - _, err = kAPI.Create(context.Background(), "/foo", "bar") - if err != nil { - // handle error - } - - // delete the newly created key only if the value is still "bar" - _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"}) - if err != nil { - // handle error - } - -Use a custom context to set timeouts on your operations: - - import "time" - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - // set a new key, ignoring it's previous state - _, err := kAPI.Set(ctx, "/ping", "pong", nil) - if err != nil { - if err == context.DeadlineExceeded { - // request took longer than 5s - } else { - // handle error - } - } - -*/ -package client diff --git a/vendor/github.com/coreos/etcd/client/keys.generated.go b/vendor/github.com/coreos/etcd/client/keys.generated.go deleted file mode 100644 index 216139c9cc71b..0000000000000 --- a/vendor/github.com/coreos/etcd/client/keys.generated.go +++ /dev/null @@ -1,1087 +0,0 @@ -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package client - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81819 = 1 - codecSelferC_RAW1819 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1819 = 10 - codecSelferValueTypeMap1819 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1819 = 2 - codecSelfer_containerMapValue1819 = 3 - codecSelfer_containerMapEnd1819 = 4 - codecSelfer_containerArrayElem1819 = 6 - codecSelfer_containerArrayEnd1819 = 7 -) - -var ( - codecSelferBitsize1819 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1819 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1819 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 time.Time - _ = v0 - } -} - -func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Action)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("action")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Action)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if x.Node == nil { - r.EncodeNil() - } else { - x.Node.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("node")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.Node == nil { - r.EncodeNil() - } else { - x.Node.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if x.PrevNode == nil { - r.EncodeNil() - } else { - x.PrevNode.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("prevNode")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.PrevNode == nil { - r.EncodeNil() - } else { - x.PrevNode.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1819) - } - } - } -} - -func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1819 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1819) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1819 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1819) - } - } -} - -func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1819) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1819) - switch yys3 { - case "action": - if r.TryDecodeAsNil() { - x.Action = "" - } else { - yyv4 := &x.Action - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "node": - if r.TryDecodeAsNil() { - if x.Node != nil { - x.Node = nil - } - } else { - if x.Node == nil { - x.Node = new(Node) - } - x.Node.CodecDecodeSelf(d) - } - case "prevNode": - if r.TryDecodeAsNil() { - if x.PrevNode != nil { - x.PrevNode = nil - } - } else { - if x.PrevNode == nil { - x.PrevNode = new(Node) - } - x.PrevNode.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1819) -} - -func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Action = "" - } else { - yyv9 := &x.Action - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - if x.Node != nil { - x.Node = nil - } - } else { - if x.Node == nil { - x.Node = new(Node) - } - x.Node.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - if x.PrevNode != nil { - x.PrevNode = nil - } - } else { - if x.PrevNode == nil { - x.PrevNode = new(Node) - } - x.PrevNode.CodecDecodeSelf(d) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) -} - -func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Dir != false - yyq2[6] = x.Expiration != nil - yyq2[7] = x.TTL != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 5 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("dir")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Value)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Value)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if x.Nodes == nil { - r.EncodeNil() - } else { - x.Nodes.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("nodes")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.Nodes == nil { - r.EncodeNil() - } else { - x.Nodes.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeUint(uint64(x.CreatedIndex)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("createdIndex")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeUint(uint64(x.CreatedIndex)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeUint(uint64(x.ModifiedIndex)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("modifiedIndex")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeUint(uint64(x.ModifiedIndex)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyq2[6] { - if x.Expiration == nil { - r.EncodeNil() - } else { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else if yym23 := z.TimeRtidIfBinc(); yym23 != 0 { - r.EncodeBuiltin(yym23, x.Expiration) - } else if z.HasExtensions() && z.EncExt(x.Expiration) { - } else if yym22 { - z.EncBinaryMarshal(x.Expiration) - } else if !yym22 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Expiration) - } else { - z.EncFallback(x.Expiration) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("expiration")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.Expiration == nil { - r.EncodeNil() - } else { - yym24 := z.EncBinary() - _ = yym24 - if false { - } else if yym25 := z.TimeRtidIfBinc(); yym25 != 0 { - r.EncodeBuiltin(yym25, x.Expiration) - } else if z.HasExtensions() && z.EncExt(x.Expiration) { - } else if yym24 { - z.EncBinaryMarshal(x.Expiration) - } else if !yym24 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Expiration) - } else { - z.EncFallback(x.Expiration) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyq2[7] { - yym27 := z.EncBinary() - _ = yym27 - if false { - } else { - r.EncodeInt(int64(x.TTL)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("ttl")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeInt(int64(x.TTL)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1819) - } - } - } -} - -func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1819 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1819) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1819 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1819) - } - } -} - -func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1819) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1819) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv4 := &x.Key - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "dir": - if r.TryDecodeAsNil() { - x.Dir = false - } else { - yyv6 := &x.Dir - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*bool)(yyv6)) = r.DecodeBool() - } - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv8 := &x.Value - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "nodes": - if r.TryDecodeAsNil() { - x.Nodes = nil - } else { - yyv10 := &x.Nodes - yyv10.CodecDecodeSelf(d) - } - case "createdIndex": - if r.TryDecodeAsNil() { - x.CreatedIndex = 0 - } else { - yyv11 := &x.CreatedIndex - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*uint64)(yyv11)) = uint64(r.DecodeUint(64)) - } - } - case "modifiedIndex": - if r.TryDecodeAsNil() { - x.ModifiedIndex = 0 - } else { - yyv13 := &x.ModifiedIndex - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*uint64)(yyv13)) = uint64(r.DecodeUint(64)) - } - } - case "expiration": - if r.TryDecodeAsNil() { - if x.Expiration != nil { - x.Expiration = nil - } - } else { - if x.Expiration == nil { - x.Expiration = new(time.Time) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if yym17 := z.TimeRtidIfBinc(); yym17 != 0 { - r.DecodeBuiltin(yym17, x.Expiration) - } else if z.HasExtensions() && z.DecExt(x.Expiration) { - } else if yym16 { - z.DecBinaryUnmarshal(x.Expiration) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Expiration) - } else { - z.DecFallback(x.Expiration, false) - } - } - case "ttl": - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - yyv18 := &x.TTL - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*int64)(yyv18)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1819) -} - -func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj20 int - var yyb20 bool - var yyhl20 bool = l >= 0 - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv21 := &x.Key - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Dir = false - } else { - yyv23 := &x.Dir - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*bool)(yyv23)) = r.DecodeBool() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv25 := &x.Value - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*string)(yyv25)) = r.DecodeString() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Nodes = nil - } else { - yyv27 := &x.Nodes - yyv27.CodecDecodeSelf(d) - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.CreatedIndex = 0 - } else { - yyv28 := &x.CreatedIndex - yym29 := z.DecBinary() - _ = yym29 - if false { - } else { - *((*uint64)(yyv28)) = uint64(r.DecodeUint(64)) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.ModifiedIndex = 0 - } else { - yyv30 := &x.ModifiedIndex - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - *((*uint64)(yyv30)) = uint64(r.DecodeUint(64)) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - if x.Expiration != nil { - x.Expiration = nil - } - } else { - if x.Expiration == nil { - x.Expiration = new(time.Time) - } - yym33 := z.DecBinary() - _ = yym33 - if false { - } else if yym34 := z.TimeRtidIfBinc(); yym34 != 0 { - r.DecodeBuiltin(yym34, x.Expiration) - } else if z.HasExtensions() && z.DecExt(x.Expiration) { - } else if yym33 { - z.DecBinaryUnmarshal(x.Expiration) - } else if !yym33 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Expiration) - } else { - z.DecFallback(x.Expiration, false) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - yyv35 := &x.TTL - yym36 := z.DecBinary() - _ = yym36 - if false { - } else { - *((*int64)(yyv35)) = int64(r.DecodeInt(64)) - } - } - for { - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - z.DecStructFieldNotFound(yyj20-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) -} - -func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - h.encNodes((Nodes)(x), e) - } - } -} - -func (x *Nodes) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - h.decNodes((*Nodes)(x), d) - } -} - -func (x codecSelfer1819) encNodes(v Nodes, e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyv1 == nil { - r.EncodeNil() - } else { - yyv1.CodecEncodeSelf(e) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) -} - -func (x codecSelfer1819) decNodes(v *Nodes, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []*Node{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]*Node, yyrl1) - } - } else { - yyv1 = make([]*Node, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - if yyv1[yyj1] != nil { - *yyv1[yyj1] = Node{} - } - } else { - if yyv1[yyj1] == nil { - yyv1[yyj1] = new(Node) - } - yyw2 := yyv1[yyj1] - yyw2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, nil) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - if yyv1[yyj1] != nil { - *yyv1[yyj1] = Node{} - } - } else { - if yyv1[yyj1] == nil { - yyv1[yyj1] = new(Node) - } - yyw3 := yyv1[yyj1] - yyw3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, nil) // var yyz1 *Node - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - if yyv1[yyj1] != nil { - *yyv1[yyj1] = Node{} - } - } else { - if yyv1[yyj1] == nil { - yyv1[yyj1] = new(Node) - } - yyw4 := yyv1[yyj1] - yyw4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []*Node{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/github.com/coreos/etcd/client/keys.go b/vendor/github.com/coreos/etcd/client/keys.go deleted file mode 100644 index 4a6c41a78bc88..0000000000000 --- a/vendor/github.com/coreos/etcd/client/keys.go +++ /dev/null @@ -1,682 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/coreos/etcd/pkg/pathutil" - "github.com/ugorji/go/codec" - "golang.org/x/net/context" -) - -const ( - ErrorCodeKeyNotFound = 100 - ErrorCodeTestFailed = 101 - ErrorCodeNotFile = 102 - ErrorCodeNotDir = 104 - ErrorCodeNodeExist = 105 - ErrorCodeRootROnly = 107 - ErrorCodeDirNotEmpty = 108 - ErrorCodeUnauthorized = 110 - - ErrorCodePrevValueRequired = 201 - ErrorCodeTTLNaN = 202 - ErrorCodeIndexNaN = 203 - ErrorCodeInvalidField = 209 - ErrorCodeInvalidForm = 210 - - ErrorCodeRaftInternal = 300 - ErrorCodeLeaderElect = 301 - - ErrorCodeWatcherCleared = 400 - ErrorCodeEventIndexCleared = 401 -) - -type Error struct { - Code int `json:"errorCode"` - Message string `json:"message"` - Cause string `json:"cause"` - Index uint64 `json:"index"` -} - -func (e Error) Error() string { - return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index) -} - -var ( - ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.") - ErrEmptyBody = errors.New("client: response body is empty") -) - -// PrevExistType is used to define an existence condition when setting -// or deleting Nodes. -type PrevExistType string - -const ( - PrevIgnore = PrevExistType("") - PrevExist = PrevExistType("true") - PrevNoExist = PrevExistType("false") -) - -var ( - defaultV2KeysPrefix = "/v2/keys" -) - -// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value -// API over HTTP. -func NewKeysAPI(c Client) KeysAPI { - return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix) -} - -// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller -// to provide a custom base URL path. This should only be used in -// very rare cases. -func NewKeysAPIWithPrefix(c Client, p string) KeysAPI { - return &httpKeysAPI{ - client: c, - prefix: p, - } -} - -type KeysAPI interface { - // Get retrieves a set of Nodes from etcd - Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) - - // Set assigns a new value to a Node identified by a given key. The caller - // may define a set of conditions in the SetOptions. If SetOptions.Dir=true - // then value is ignored. - Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error) - - // Delete removes a Node identified by the given key, optionally destroying - // all of its children as well. The caller may define a set of required - // conditions in an DeleteOptions object. - Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) - - // Create is an alias for Set w/ PrevExist=false - Create(ctx context.Context, key, value string) (*Response, error) - - // CreateInOrder is used to atomically create in-order keys within the given directory. - CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error) - - // Update is an alias for Set w/ PrevExist=true - Update(ctx context.Context, key, value string) (*Response, error) - - // Watcher builds a new Watcher targeted at a specific Node identified - // by the given key. The Watcher may be configured at creation time - // through a WatcherOptions object. The returned Watcher is designed - // to emit events that happen to a Node, and optionally to its children. - Watcher(key string, opts *WatcherOptions) Watcher -} - -type WatcherOptions struct { - // AfterIndex defines the index after-which the Watcher should - // start emitting events. For example, if a value of 5 is - // provided, the first event will have an index >= 6. - // - // Setting AfterIndex to 0 (default) means that the Watcher - // should start watching for events starting at the current - // index, whatever that may be. - AfterIndex uint64 - - // Recursive specifies whether or not the Watcher should emit - // events that occur in children of the given keyspace. If set - // to false (default), events will be limited to those that - // occur for the exact key. - Recursive bool -} - -type CreateInOrderOptions struct { - // TTL defines a period of time after-which the Node should - // expire and no longer exist. Values <= 0 are ignored. Given - // that the zero-value is ignored, TTL cannot be used to set - // a TTL of 0. - TTL time.Duration -} - -type SetOptions struct { - // PrevValue specifies what the current value of the Node must - // be in order for the Set operation to succeed. - // - // Leaving this field empty means that the caller wishes to - // ignore the current value of the Node. This cannot be used - // to compare the Node's current value to an empty string. - // - // PrevValue is ignored if Dir=true - PrevValue string - - // PrevIndex indicates what the current ModifiedIndex of the - // Node must be in order for the Set operation to succeed. - // - // If PrevIndex is set to 0 (default), no comparison is made. - PrevIndex uint64 - - // PrevExist specifies whether the Node must currently exist - // (PrevExist) or not (PrevNoExist). If the caller does not - // care about existence, set PrevExist to PrevIgnore, or simply - // leave it unset. - PrevExist PrevExistType - - // TTL defines a period of time after-which the Node should - // expire and no longer exist. Values <= 0 are ignored. Given - // that the zero-value is ignored, TTL cannot be used to set - // a TTL of 0. - TTL time.Duration - - // Refresh set to true means a TTL value can be updated - // without firing a watch or changing the node value. A - // value must not be provided when refreshing a key. - Refresh bool - - // Dir specifies whether or not this Node should be created as a directory. - Dir bool - - // NoValueOnSuccess specifies whether the response contains the current value of the Node. - // If set, the response will only contain the current value when the request fails. - NoValueOnSuccess bool -} - -type GetOptions struct { - // Recursive defines whether or not all children of the Node - // should be returned. - Recursive bool - - // Sort instructs the server whether or not to sort the Nodes. - // If true, the Nodes are sorted alphabetically by key in - // ascending order (A to z). If false (default), the Nodes will - // not be sorted and the ordering used should not be considered - // predictable. - Sort bool - - // Quorum specifies whether it gets the latest committed value that - // has been applied in quorum of members, which ensures external - // consistency (or linearizability). - Quorum bool -} - -type DeleteOptions struct { - // PrevValue specifies what the current value of the Node must - // be in order for the Delete operation to succeed. - // - // Leaving this field empty means that the caller wishes to - // ignore the current value of the Node. This cannot be used - // to compare the Node's current value to an empty string. - PrevValue string - - // PrevIndex indicates what the current ModifiedIndex of the - // Node must be in order for the Delete operation to succeed. - // - // If PrevIndex is set to 0 (default), no comparison is made. - PrevIndex uint64 - - // Recursive defines whether or not all children of the Node - // should be deleted. If set to true, all children of the Node - // identified by the given key will be deleted. If left unset - // or explicitly set to false, only a single Node will be - // deleted. - Recursive bool - - // Dir specifies whether or not this Node should be removed as a directory. - Dir bool -} - -type Watcher interface { - // Next blocks until an etcd event occurs, then returns a Response - // representing that event. The behavior of Next depends on the - // WatcherOptions used to construct the Watcher. Next is designed to - // be called repeatedly, each time blocking until a subsequent event - // is available. - // - // If the provided context is cancelled, Next will return a non-nil - // error. Any other failures encountered while waiting for the next - // event (connection issues, deserialization failures, etc) will - // also result in a non-nil error. - Next(context.Context) (*Response, error) -} - -type Response struct { - // Action is the name of the operation that occurred. Possible values - // include get, set, delete, update, create, compareAndSwap, - // compareAndDelete and expire. - Action string `json:"action"` - - // Node represents the state of the relevant etcd Node. - Node *Node `json:"node"` - - // PrevNode represents the previous state of the Node. PrevNode is non-nil - // only if the Node existed before the action occurred and the action - // caused a change to the Node. - PrevNode *Node `json:"prevNode"` - - // Index holds the cluster-level index at the time the Response was generated. - // This index is not tied to the Node(s) contained in this Response. - Index uint64 `json:"-"` - - // ClusterID holds the cluster-level ID reported by the server. This - // should be different for different etcd clusters. - ClusterID string `json:"-"` -} - -type Node struct { - // Key represents the unique location of this Node (e.g. "/foo/bar"). - Key string `json:"key"` - - // Dir reports whether node describes a directory. - Dir bool `json:"dir,omitempty"` - - // Value is the current data stored on this Node. If this Node - // is a directory, Value will be empty. - Value string `json:"value"` - - // Nodes holds the children of this Node, only if this Node is a directory. - // This slice of will be arbitrarily deep (children, grandchildren, great- - // grandchildren, etc.) if a recursive Get or Watch request were made. - Nodes Nodes `json:"nodes"` - - // CreatedIndex is the etcd index at-which this Node was created. - CreatedIndex uint64 `json:"createdIndex"` - - // ModifiedIndex is the etcd index at-which this Node was last modified. - ModifiedIndex uint64 `json:"modifiedIndex"` - - // Expiration is the server side expiration time of the key. - Expiration *time.Time `json:"expiration,omitempty"` - - // TTL is the time to live of the key in second. - TTL int64 `json:"ttl,omitempty"` -} - -func (n *Node) String() string { - return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL) -} - -// TTLDuration returns the Node's TTL as a time.Duration object -func (n *Node) TTLDuration() time.Duration { - return time.Duration(n.TTL) * time.Second -} - -type Nodes []*Node - -// interfaces for sorting - -func (ns Nodes) Len() int { return len(ns) } -func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key } -func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } - -type httpKeysAPI struct { - client httpClient - prefix string -} - -func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) { - act := &setAction{ - Prefix: k.prefix, - Key: key, - Value: val, - } - - if opts != nil { - act.PrevValue = opts.PrevValue - act.PrevIndex = opts.PrevIndex - act.PrevExist = opts.PrevExist - act.TTL = opts.TTL - act.Refresh = opts.Refresh - act.Dir = opts.Dir - act.NoValueOnSuccess = opts.NoValueOnSuccess - } - - doCtx := ctx - if act.PrevExist == PrevNoExist { - doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue) - } - resp, body, err := k.client.Do(doCtx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) { - return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist}) -} - -func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) { - act := &createInOrderAction{ - Prefix: k.prefix, - Dir: dir, - Value: val, - } - - if opts != nil { - act.TTL = opts.TTL - } - - resp, body, err := k.client.Do(ctx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) { - return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist}) -} - -func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) { - act := &deleteAction{ - Prefix: k.prefix, - Key: key, - } - - if opts != nil { - act.PrevValue = opts.PrevValue - act.PrevIndex = opts.PrevIndex - act.Dir = opts.Dir - act.Recursive = opts.Recursive - } - - doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue) - resp, body, err := k.client.Do(doCtx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) { - act := &getAction{ - Prefix: k.prefix, - Key: key, - } - - if opts != nil { - act.Recursive = opts.Recursive - act.Sorted = opts.Sort - act.Quorum = opts.Quorum - } - - resp, body, err := k.client.Do(ctx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher { - act := waitAction{ - Prefix: k.prefix, - Key: key, - } - - if opts != nil { - act.Recursive = opts.Recursive - if opts.AfterIndex > 0 { - act.WaitIndex = opts.AfterIndex + 1 - } - } - - return &httpWatcher{ - client: k.client, - nextWait: act, - } -} - -type httpWatcher struct { - client httpClient - nextWait waitAction -} - -func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) { - for { - httpresp, body, err := hw.client.Do(ctx, &hw.nextWait) - if err != nil { - return nil, err - } - - resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body) - if err != nil { - if err == ErrEmptyBody { - continue - } - return nil, err - } - - hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1 - return resp, nil - } -} - -// v2KeysURL forms a URL representing the location of a key. -// The endpoint argument represents the base URL of an etcd -// server. The prefix is the path needed to route from the -// provided endpoint's path to the root of the keys API -// (typically "/v2/keys"). -func v2KeysURL(ep url.URL, prefix, key string) *url.URL { - // We concatenate all parts together manually. We cannot use - // path.Join because it does not reserve trailing slash. - // We call CanonicalURLPath to further cleanup the path. - if prefix != "" && prefix[0] != '/' { - prefix = "/" + prefix - } - if key != "" && key[0] != '/' { - key = "/" + key - } - ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key) - return &ep -} - -type getAction struct { - Prefix string - Key string - Recursive bool - Sorted bool - Quorum bool -} - -func (g *getAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, g.Prefix, g.Key) - - params := u.Query() - params.Set("recursive", strconv.FormatBool(g.Recursive)) - params.Set("sorted", strconv.FormatBool(g.Sorted)) - params.Set("quorum", strconv.FormatBool(g.Quorum)) - u.RawQuery = params.Encode() - - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -type waitAction struct { - Prefix string - Key string - WaitIndex uint64 - Recursive bool -} - -func (w *waitAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, w.Prefix, w.Key) - - params := u.Query() - params.Set("wait", "true") - params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10)) - params.Set("recursive", strconv.FormatBool(w.Recursive)) - u.RawQuery = params.Encode() - - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -type setAction struct { - Prefix string - Key string - Value string - PrevValue string - PrevIndex uint64 - PrevExist PrevExistType - TTL time.Duration - Refresh bool - Dir bool - NoValueOnSuccess bool -} - -func (a *setAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, a.Prefix, a.Key) - - params := u.Query() - form := url.Values{} - - // we're either creating a directory or setting a key - if a.Dir { - params.Set("dir", strconv.FormatBool(a.Dir)) - } else { - // These options are only valid for setting a key - if a.PrevValue != "" { - params.Set("prevValue", a.PrevValue) - } - form.Add("value", a.Value) - } - - // Options which apply to both setting a key and creating a dir - if a.PrevIndex != 0 { - params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) - } - if a.PrevExist != PrevIgnore { - params.Set("prevExist", string(a.PrevExist)) - } - if a.TTL > 0 { - form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) - } - - if a.Refresh { - form.Add("refresh", "true") - } - if a.NoValueOnSuccess { - params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess)) - } - - u.RawQuery = params.Encode() - body := strings.NewReader(form.Encode()) - - req, _ := http.NewRequest("PUT", u.String(), body) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - return req -} - -type deleteAction struct { - Prefix string - Key string - PrevValue string - PrevIndex uint64 - Dir bool - Recursive bool -} - -func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, a.Prefix, a.Key) - - params := u.Query() - if a.PrevValue != "" { - params.Set("prevValue", a.PrevValue) - } - if a.PrevIndex != 0 { - params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) - } - if a.Dir { - params.Set("dir", "true") - } - if a.Recursive { - params.Set("recursive", "true") - } - u.RawQuery = params.Encode() - - req, _ := http.NewRequest("DELETE", u.String(), nil) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - return req -} - -type createInOrderAction struct { - Prefix string - Dir string - Value string - TTL time.Duration -} - -func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, a.Prefix, a.Dir) - - form := url.Values{} - form.Add("value", a.Value) - if a.TTL > 0 { - form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) - } - body := strings.NewReader(form.Encode()) - - req, _ := http.NewRequest("POST", u.String(), body) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - return req -} - -func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) { - switch code { - case http.StatusOK, http.StatusCreated: - if len(body) == 0 { - return nil, ErrEmptyBody - } - res, err = unmarshalSuccessfulKeysResponse(header, body) - default: - err = unmarshalFailedKeysResponse(body) - } - - return -} - -func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) { - var res Response - err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res) - if err != nil { - return nil, ErrInvalidJSON - } - if header.Get("X-Etcd-Index") != "" { - res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64) - if err != nil { - return nil, err - } - } - res.ClusterID = header.Get("X-Etcd-Cluster-ID") - return &res, nil -} - -func unmarshalFailedKeysResponse(body []byte) error { - var etcdErr Error - if err := json.Unmarshal(body, &etcdErr); err != nil { - return ErrInvalidJSON - } - return etcdErr -} diff --git a/vendor/github.com/coreos/etcd/client/members.go b/vendor/github.com/coreos/etcd/client/members.go deleted file mode 100644 index 23adf07ad91b7..0000000000000 --- a/vendor/github.com/coreos/etcd/client/members.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "net/url" - "path" - - "golang.org/x/net/context" - - "github.com/coreos/etcd/pkg/types" -) - -var ( - defaultV2MembersPrefix = "/v2/members" - defaultLeaderSuffix = "/leader" -) - -type Member struct { - // ID is the unique identifier of this Member. - ID string `json:"id"` - - // Name is a human-readable, non-unique identifier of this Member. - Name string `json:"name"` - - // PeerURLs represents the HTTP(S) endpoints this Member uses to - // participate in etcd's consensus protocol. - PeerURLs []string `json:"peerURLs"` - - // ClientURLs represents the HTTP(S) endpoints on which this Member - // serves it's client-facing APIs. - ClientURLs []string `json:"clientURLs"` -} - -type memberCollection []Member - -func (c *memberCollection) UnmarshalJSON(data []byte) error { - d := struct { - Members []Member - }{} - - if err := json.Unmarshal(data, &d); err != nil { - return err - } - - if d.Members == nil { - *c = make([]Member, 0) - return nil - } - - *c = d.Members - return nil -} - -type memberCreateOrUpdateRequest struct { - PeerURLs types.URLs -} - -func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) { - s := struct { - PeerURLs []string `json:"peerURLs"` - }{ - PeerURLs: make([]string, len(m.PeerURLs)), - } - - for i, u := range m.PeerURLs { - s.PeerURLs[i] = u.String() - } - - return json.Marshal(&s) -} - -// NewMembersAPI constructs a new MembersAPI that uses HTTP to -// interact with etcd's membership API. -func NewMembersAPI(c Client) MembersAPI { - return &httpMembersAPI{ - client: c, - } -} - -type MembersAPI interface { - // List enumerates the current cluster membership. - List(ctx context.Context) ([]Member, error) - - // Add instructs etcd to accept a new Member into the cluster. - Add(ctx context.Context, peerURL string) (*Member, error) - - // Remove demotes an existing Member out of the cluster. - Remove(ctx context.Context, mID string) error - - // Update instructs etcd to update an existing Member in the cluster. - Update(ctx context.Context, mID string, peerURLs []string) error - - // Leader gets current leader of the cluster - Leader(ctx context.Context) (*Member, error) -} - -type httpMembersAPI struct { - client httpClient -} - -func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) { - req := &membersAPIActionList{} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return nil, err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - return nil, err - } - - var mCollection memberCollection - if err := json.Unmarshal(body, &mCollection); err != nil { - return nil, err - } - - return []Member(mCollection), nil -} - -func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) { - urls, err := types.NewURLs([]string{peerURL}) - if err != nil { - return nil, err - } - - req := &membersAPIActionAdd{peerURLs: urls} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return nil, err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusCreated { - var merr membersError - if err := json.Unmarshal(body, &merr); err != nil { - return nil, err - } - return nil, merr - } - - var memb Member - if err := json.Unmarshal(body, &memb); err != nil { - return nil, err - } - - return &memb, nil -} - -func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error { - urls, err := types.NewURLs(peerURLs) - if err != nil { - return err - } - - req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil { - return err - } - - if resp.StatusCode != http.StatusNoContent { - var merr membersError - if err := json.Unmarshal(body, &merr); err != nil { - return err - } - return merr - } - - return nil -} - -func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error { - req := &membersAPIActionRemove{memberID: memberID} - resp, _, err := m.client.Do(ctx, req) - if err != nil { - return err - } - - return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone) -} - -func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) { - req := &membersAPIActionLeader{} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return nil, err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - return nil, err - } - - var leader Member - if err := json.Unmarshal(body, &leader); err != nil { - return nil, err - } - - return &leader, nil -} - -type membersAPIActionList struct{} - -func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -type membersAPIActionRemove struct { - memberID string -} - -func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - u.Path = path.Join(u.Path, d.memberID) - req, _ := http.NewRequest("DELETE", u.String(), nil) - return req -} - -type membersAPIActionAdd struct { - peerURLs types.URLs -} - -func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} - b, _ := json.Marshal(&m) - req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b)) - req.Header.Set("Content-Type", "application/json") - return req -} - -type membersAPIActionUpdate struct { - memberID string - peerURLs types.URLs -} - -func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} - u.Path = path.Join(u.Path, a.memberID) - b, _ := json.Marshal(&m) - req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b)) - req.Header.Set("Content-Type", "application/json") - return req -} - -func assertStatusCode(got int, want ...int) (err error) { - for _, w := range want { - if w == got { - return nil - } - } - return fmt.Errorf("unexpected status code %d", got) -} - -type membersAPIActionLeader struct{} - -func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - u.Path = path.Join(u.Path, defaultLeaderSuffix) - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -// v2MembersURL add the necessary path to the provided endpoint -// to route requests to the default v2 members API. -func v2MembersURL(ep url.URL) *url.URL { - ep.Path = path.Join(ep.Path, defaultV2MembersPrefix) - return &ep -} - -type membersError struct { - Message string `json:"message"` - Code int `json:"-"` -} - -func (e membersError) Error() string { - return e.Message -} diff --git a/vendor/github.com/coreos/etcd/client/util.go b/vendor/github.com/coreos/etcd/client/util.go deleted file mode 100644 index 15a8babff4d4f..0000000000000 --- a/vendor/github.com/coreos/etcd/client/util.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "regexp" -) - -var ( - roleNotFoundRegExp *regexp.Regexp - userNotFoundRegExp *regexp.Regexp -) - -func init() { - roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.") - userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.") -} - -// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound. -func IsKeyNotFound(err error) bool { - if cErr, ok := err.(Error); ok { - return cErr.Code == ErrorCodeKeyNotFound - } - return false -} - -// IsRoleNotFound returns true if the error means role not found of v2 API. -func IsRoleNotFound(err error) bool { - if ae, ok := err.(authError); ok { - return roleNotFoundRegExp.MatchString(ae.Message) - } - return false -} - -// IsUserNotFound returns true if the error means user not found of v2 API. -func IsUserNotFound(err error) bool { - if ae, ok := err.(authError); ok { - return userNotFoundRegExp.MatchString(ae.Message) - } - return false -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go deleted file mode 100644 index 5e2de58e9a109..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package api - -import ( - "sync" - - "github.com/coreos/etcd/version" - "github.com/coreos/go-semver/semver" - "github.com/coreos/pkg/capnslog" -) - -type Capability string - -const ( - AuthCapability Capability = "auth" - V3rpcCapability Capability = "v3rpc" -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api") - - // capabilityMaps is a static map of version to capability map. - capabilityMaps = map[string]map[Capability]bool{ - "3.0.0": {AuthCapability: true, V3rpcCapability: true}, - "3.1.0": {AuthCapability: true, V3rpcCapability: true}, - "3.2.0": {AuthCapability: true, V3rpcCapability: true}, - } - - enableMapMu sync.RWMutex - // enabledMap points to a map in capabilityMaps - enabledMap map[Capability]bool - - curVersion *semver.Version -) - -func init() { - enabledMap = map[Capability]bool{ - AuthCapability: true, - V3rpcCapability: true, - } -} - -// UpdateCapability updates the enabledMap when the cluster version increases. -func UpdateCapability(v *semver.Version) { - if v == nil { - // if recovered but version was never set by cluster - return - } - enableMapMu.Lock() - if curVersion != nil && !curVersion.LessThan(*v) { - enableMapMu.Unlock() - return - } - curVersion = v - enabledMap = capabilityMaps[curVersion.String()] - enableMapMu.Unlock() - plog.Infof("enabled capabilities for version %s", version.Cluster(v.String())) -} - -func IsCapabilityEnabled(c Capability) bool { - enableMapMu.RLock() - defer enableMapMu.RUnlock() - if enabledMap == nil { - return false - } - return enabledMap[c] -} - -func EnableCapability(c Capability) { - enableMapMu.Lock() - defer enableMapMu.Unlock() - enabledMap[c] = true -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go deleted file mode 100644 index 87face4a139c5..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package api - -import ( - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/types" - - "github.com/coreos/go-semver/semver" -) - -// Cluster is an interface representing a collection of members in one etcd cluster. -type Cluster interface { - // ID returns the cluster ID - ID() types.ID - // ClientURLs returns an aggregate set of all URLs on which this - // cluster is listening for client requests - ClientURLs() []string - // Members returns a slice of members sorted by their ID - Members() []*membership.Member - // Member retrieves a particular member based on ID, or nil if the - // member does not exist in the cluster - Member(id types.ID) *membership.Member - // IsIDRemoved checks whether the given ID has been removed from this - // cluster at some point in the past - IsIDRemoved(id types.ID) bool - // Version is the cluster-wide minimum major.minor version. - Version() *semver.Version -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/doc.go deleted file mode 100644 index f44881be663e0..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package api manages the capabilities and features that are exposed to clients by the etcd cluster. -package api diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go deleted file mode 100644 index e66c5261d0a5f..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "github.com/coreos/etcd/etcdserver" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" -) - -type AuthServer struct { - authenticator etcdserver.Authenticator -} - -func NewAuthServer(s *etcdserver.EtcdServer) *AuthServer { - return &AuthServer{authenticator: s} -} - -func (as *AuthServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) { - resp, err := as.authenticator.AuthEnable(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { - resp, err := as.authenticator.AuthDisable(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { - resp, err := as.authenticator.Authenticate(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - resp, err := as.authenticator.RoleAdd(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - resp, err := as.authenticator.RoleDelete(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - resp, err := as.authenticator.RoleGet(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - resp, err := as.authenticator.RoleList(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - resp, err := as.authenticator.RoleRevokePermission(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - resp, err := as.authenticator.RoleGrantPermission(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - resp, err := as.authenticator.UserAdd(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - resp, err := as.authenticator.UserDelete(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - resp, err := as.authenticator.UserGet(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - resp, err := as.authenticator.UserList(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - resp, err := as.authenticator.UserGrantRole(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - resp, err := as.authenticator.UserRevokeRole(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - resp, err := as.authenticator.UserChangePassword(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go deleted file mode 100644 index 17a2c87ae61d7..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import "github.com/gogo/protobuf/proto" - -type codec struct{} - -func (c *codec) Marshal(v interface{}) ([]byte, error) { - b, err := proto.Marshal(v.(proto.Message)) - sentBytes.Add(float64(len(b))) - return b, err -} - -func (c *codec) Unmarshal(data []byte, v interface{}) error { - receivedBytes.Add(float64(len(data))) - return proto.Unmarshal(data, v.(proto.Message)) -} - -func (c *codec) String() string { - return "proto" -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go deleted file mode 100644 index 5333491a2e25e..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "crypto/tls" - "io/ioutil" - "math" - "os" - "sync" - - "github.com/coreos/etcd/etcdserver" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" -) - -const ( - grpcOverheadBytes = 512 * 1024 - maxStreams = math.MaxUint32 - maxSendBytes = math.MaxInt32 -) - -// integration tests call this multiple times, which is racey in gRPC side -var grpclogOnce sync.Once - -func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOption) *grpc.Server { - var opts []grpc.ServerOption - opts = append(opts, grpc.CustomCodec(&codec{})) - if tls != nil { - opts = append(opts, grpc.Creds(credentials.NewTLS(tls))) - } - opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s))) - opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s))) - opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes))) - opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes)) - opts = append(opts, grpc.MaxConcurrentStreams(maxStreams)) - grpcServer := grpc.NewServer(append(opts, gopts...)...) - - pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s)) - pb.RegisterWatchServer(grpcServer, NewWatchServer(s)) - pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s)) - pb.RegisterClusterServer(grpcServer, NewClusterServer(s)) - pb.RegisterAuthServer(grpcServer, NewAuthServer(s)) - pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s)) - - grpclogOnce.Do(func() { - if s.Cfg.Debug { - grpc.EnableTracing = true - // enable info, warning, error - grpclog.SetLoggerV2(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr)) - } else { - // only discard info - grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr)) - } - }) - return grpcServer -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go deleted file mode 100644 index d6d7f35d50f11..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "github.com/coreos/etcd/etcdserver" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -type header struct { - clusterID int64 - memberID int64 - raftTimer etcdserver.RaftTimer - rev func() int64 -} - -func newHeader(s *etcdserver.EtcdServer) header { - return header{ - clusterID: int64(s.Cluster().ID()), - memberID: int64(s.ID()), - raftTimer: s, - rev: func() int64 { return s.KV().Rev() }, - } -} - -// fill populates pb.ResponseHeader using etcdserver information -func (h *header) fill(rh *pb.ResponseHeader) { - rh.ClusterId = uint64(h.clusterID) - rh.MemberId = uint64(h.memberID) - rh.RaftTerm = h.raftTimer.Term() - if rh.Revision == 0 { - rh.Revision = h.rev() - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go deleted file mode 100644 index de9470a8905c2..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "sync" - "time" - - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" - - prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -const ( - maxNoLeaderCnt = 3 -) - -type streamsMap struct { - mu sync.Mutex - streams map[grpc.ServerStream]struct{} -} - -func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - if !api.IsCapabilityEnabled(api.V3rpcCapability) { - return nil, rpctypes.ErrGRPCNotCapable - } - - md, ok := metadata.FromIncomingContext(ctx) - if ok { - if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { - if s.Leader() == types.ID(raft.None) { - return nil, rpctypes.ErrGRPCNoLeader - } - } - } - - return prometheus.UnaryServerInterceptor(ctx, req, info, handler) - } -} - -func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor { - smap := monitorLeader(s) - - return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - if !api.IsCapabilityEnabled(api.V3rpcCapability) { - return rpctypes.ErrGRPCNotCapable - } - - md, ok := metadata.FromIncomingContext(ss.Context()) - if ok { - if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { - if s.Leader() == types.ID(raft.None) { - return rpctypes.ErrGRPCNoLeader - } - - cctx, cancel := context.WithCancel(ss.Context()) - ss = serverStreamWithCtx{ctx: cctx, cancel: &cancel, ServerStream: ss} - - smap.mu.Lock() - smap.streams[ss] = struct{}{} - smap.mu.Unlock() - - defer func() { - smap.mu.Lock() - delete(smap.streams, ss) - smap.mu.Unlock() - cancel() - }() - - } - } - - return prometheus.StreamServerInterceptor(srv, ss, info, handler) - } -} - -type serverStreamWithCtx struct { - grpc.ServerStream - ctx context.Context - cancel *context.CancelFunc -} - -func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx } - -func monitorLeader(s *etcdserver.EtcdServer) *streamsMap { - smap := &streamsMap{ - streams: make(map[grpc.ServerStream]struct{}), - } - - go func() { - election := time.Duration(s.Cfg.TickMs) * time.Duration(s.Cfg.ElectionTicks) * time.Millisecond - noLeaderCnt := 0 - - for { - select { - case <-s.StopNotify(): - return - case <-time.After(election): - if s.Leader() == types.ID(raft.None) { - noLeaderCnt++ - } else { - noLeaderCnt = 0 - } - - // We are more conservative on canceling existing streams. Reconnecting streams - // cost much more than just rejecting new requests. So we wait until the member - // cannot find a leader for maxNoLeaderCnt election timeouts to cancel existing streams. - if noLeaderCnt >= maxNoLeaderCnt { - smap.mu.Lock() - for ss := range smap.streams { - if ssWithCtx, ok := ss.(serverStreamWithCtx); ok { - (*ssWithCtx.cancel)() - <-ss.Context().Done() - } - } - smap.streams = make(map[grpc.ServerStream]struct{}) - smap.mu.Unlock() - } - } - } - }() - - return smap -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go deleted file mode 100644 index d0220e03a26e3..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package v3rpc implements etcd v3 RPC system based on gRPC. -package v3rpc - -import ( - "sort" - - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/pkg/capnslog" - "golang.org/x/net/context" -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v3rpc") - - // Max operations per txn list. For example, Txn.Success can have at most 128 operations, - // and Txn.Failure can have at most 128 operations. - MaxOpsPerTxn = 128 -) - -type kvServer struct { - hdr header - kv etcdserver.RaftKV -} - -func NewKVServer(s *etcdserver.EtcdServer) pb.KVServer { - return &kvServer{hdr: newHeader(s), kv: s} -} - -func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { - if err := checkRangeRequest(r); err != nil { - return nil, err - } - - resp, err := s.kv.Range(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - - if resp.Header == nil { - plog.Panic("unexpected nil resp.Header") - } - s.hdr.fill(resp.Header) - return resp, nil -} - -func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { - if err := checkPutRequest(r); err != nil { - return nil, err - } - - resp, err := s.kv.Put(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - - if resp.Header == nil { - plog.Panic("unexpected nil resp.Header") - } - s.hdr.fill(resp.Header) - return resp, nil -} - -func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - if err := checkDeleteRequest(r); err != nil { - return nil, err - } - - resp, err := s.kv.DeleteRange(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - - if resp.Header == nil { - plog.Panic("unexpected nil resp.Header") - } - s.hdr.fill(resp.Header) - return resp, nil -} - -func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - if err := checkTxnRequest(r); err != nil { - return nil, err - } - - resp, err := s.kv.Txn(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - - if resp.Header == nil { - plog.Panic("unexpected nil resp.Header") - } - s.hdr.fill(resp.Header) - return resp, nil -} - -func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) { - resp, err := s.kv.Compact(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - - if resp.Header == nil { - plog.Panic("unexpected nil resp.Header") - } - s.hdr.fill(resp.Header) - return resp, nil -} - -func checkRangeRequest(r *pb.RangeRequest) error { - if len(r.Key) == 0 { - return rpctypes.ErrGRPCEmptyKey - } - return nil -} - -func checkPutRequest(r *pb.PutRequest) error { - if len(r.Key) == 0 { - return rpctypes.ErrGRPCEmptyKey - } - if r.IgnoreValue && len(r.Value) != 0 { - return rpctypes.ErrGRPCValueProvided - } - if r.IgnoreLease && r.Lease != 0 { - return rpctypes.ErrGRPCLeaseProvided - } - return nil -} - -func checkDeleteRequest(r *pb.DeleteRangeRequest) error { - if len(r.Key) == 0 { - return rpctypes.ErrGRPCEmptyKey - } - return nil -} - -func checkTxnRequest(r *pb.TxnRequest) error { - if len(r.Compare) > MaxOpsPerTxn || len(r.Success) > MaxOpsPerTxn || len(r.Failure) > MaxOpsPerTxn { - return rpctypes.ErrGRPCTooManyOps - } - - for _, c := range r.Compare { - if len(c.Key) == 0 { - return rpctypes.ErrGRPCEmptyKey - } - } - - for _, u := range r.Success { - if err := checkRequestOp(u); err != nil { - return err - } - } - if err := checkRequestDupKeys(r.Success); err != nil { - return err - } - - for _, u := range r.Failure { - if err := checkRequestOp(u); err != nil { - return err - } - } - return checkRequestDupKeys(r.Failure) -} - -// checkRequestDupKeys gives rpctypes.ErrGRPCDuplicateKey if the same key is modified twice -func checkRequestDupKeys(reqs []*pb.RequestOp) error { - // check put overlap - keys := make(map[string]struct{}) - for _, requ := range reqs { - tv, ok := requ.Request.(*pb.RequestOp_RequestPut) - if !ok { - continue - } - preq := tv.RequestPut - if preq == nil { - continue - } - if _, ok := keys[string(preq.Key)]; ok { - return rpctypes.ErrGRPCDuplicateKey - } - keys[string(preq.Key)] = struct{}{} - } - - // no need to check deletes if no puts; delete overlaps are permitted - if len(keys) == 0 { - return nil - } - - // sort keys for range checking - sortedKeys := []string{} - for k := range keys { - sortedKeys = append(sortedKeys, k) - } - sort.Strings(sortedKeys) - - // check put overlap with deletes - for _, requ := range reqs { - tv, ok := requ.Request.(*pb.RequestOp_RequestDeleteRange) - if !ok { - continue - } - dreq := tv.RequestDeleteRange - if dreq == nil { - continue - } - if dreq.RangeEnd == nil { - if _, found := keys[string(dreq.Key)]; found { - return rpctypes.ErrGRPCDuplicateKey - } - } else { - lo := sort.SearchStrings(sortedKeys, string(dreq.Key)) - hi := sort.SearchStrings(sortedKeys, string(dreq.RangeEnd)) - if lo != hi { - // element between lo and hi => overlap - return rpctypes.ErrGRPCDuplicateKey - } - } - } - - return nil -} - -func checkRequestOp(u *pb.RequestOp) error { - // TODO: ensure only one of the field is set. - switch uv := u.Request.(type) { - case *pb.RequestOp_RequestRange: - if uv.RequestRange != nil { - return checkRangeRequest(uv.RequestRange) - } - case *pb.RequestOp_RequestPut: - if uv.RequestPut != nil { - return checkPutRequest(uv.RequestPut) - } - case *pb.RequestOp_RequestDeleteRange: - if uv.RequestDeleteRange != nil { - return checkDeleteRequest(uv.RequestDeleteRange) - } - default: - // empty op / nil entry - return rpctypes.ErrGRPCKeyNotFound - } - return nil -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go deleted file mode 100644 index fd27d10c02b1c..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "io" - - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/lease" - "golang.org/x/net/context" -) - -type LeaseServer struct { - hdr header - le etcdserver.Lessor -} - -func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { - return &LeaseServer{le: s, hdr: newHeader(s)} -} - -func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - resp, err := ls.le.LeaseGrant(ctx, cr) - - if err != nil { - return nil, togRPCError(err) - } - ls.hdr.fill(resp.Header) - return resp, nil -} - -func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - resp, err := ls.le.LeaseRevoke(ctx, rr) - if err != nil { - return nil, togRPCError(err) - } - ls.hdr.fill(resp.Header) - return resp, nil -} - -func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { - resp, err := ls.le.LeaseTimeToLive(ctx, rr) - if err != nil && err != lease.ErrLeaseNotFound { - return nil, togRPCError(err) - } - if err == lease.ErrLeaseNotFound { - resp = &pb.LeaseTimeToLiveResponse{ - Header: &pb.ResponseHeader{}, - ID: rr.ID, - TTL: -1, - } - } - ls.hdr.fill(resp.Header) - return resp, nil -} - -func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) { - errc := make(chan error, 1) - go func() { - errc <- ls.leaseKeepAlive(stream) - }() - select { - case err = <-errc: - case <-stream.Context().Done(): - // the only server-side cancellation is noleader for now. - err = stream.Context().Err() - if err == context.Canceled { - err = rpctypes.ErrGRPCNoLeader - } - } - return err -} - -func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { - for { - req, err := stream.Recv() - if err == io.EOF { - return nil - } - if err != nil { - if isClientCtxErr(stream.Context().Err(), err) { - plog.Debugf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error()) - } else { - plog.Warningf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error()) - } - return err - } - - // Create header before we sent out the renew request. - // This can make sure that the revision is strictly smaller or equal to - // when the keepalive happened at the local server (when the local server is the leader) - // or remote leader. - // Without this, a lease might be revoked at rev 3 but client can see the keepalive succeeded - // at rev 4. - resp := &pb.LeaseKeepAliveResponse{ID: req.ID, Header: &pb.ResponseHeader{}} - ls.hdr.fill(resp.Header) - - ttl, err := ls.le.LeaseRenew(stream.Context(), lease.LeaseID(req.ID)) - if err == lease.ErrLeaseNotFound { - err = nil - ttl = 0 - } - - if err != nil { - return togRPCError(err) - } - - resp.TTL = ttl - err = stream.Send(resp) - if err != nil { - if isClientCtxErr(stream.Context().Err(), err) { - plog.Debugf("failed to send lease keepalive response to gRPC stream (%q)", err.Error()) - } else { - plog.Warningf("failed to send lease keepalive response to gRPC stream (%q)", err.Error()) - } - return err - } - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go deleted file mode 100644 index 3657d03608291..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "crypto/sha256" - "io" - - "github.com/coreos/etcd/auth" - "github.com/coreos/etcd/etcdserver" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/version" - "golang.org/x/net/context" -) - -type KVGetter interface { - KV() mvcc.ConsistentWatchableKV -} - -type BackendGetter interface { - Backend() backend.Backend -} - -type Alarmer interface { - Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) -} - -type RaftStatusGetter interface { - Index() uint64 - Term() uint64 - Leader() types.ID -} - -type AuthGetter interface { - AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) - AuthStore() auth.AuthStore -} - -type maintenanceServer struct { - rg RaftStatusGetter - kg KVGetter - bg BackendGetter - a Alarmer - hdr header -} - -func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer { - srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, hdr: newHeader(s)} - return &authMaintenanceServer{srv, s} -} - -func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { - plog.Noticef("starting to defragment the storage backend...") - err := ms.bg.Backend().Defrag() - if err != nil { - plog.Errorf("failed to defragment the storage backend (%v)", err) - return nil, err - } - plog.Noticef("finished defragmenting the storage backend") - return &pb.DefragmentResponse{}, nil -} - -func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error { - snap := ms.bg.Backend().Snapshot() - pr, pw := io.Pipe() - - defer pr.Close() - - go func() { - snap.WriteTo(pw) - if err := snap.Close(); err != nil { - plog.Errorf("error closing snapshot (%v)", err) - } - pw.Close() - }() - - // send file data - h := sha256.New() - br := int64(0) - buf := make([]byte, 32*1024) - sz := snap.Size() - for br < sz { - n, err := io.ReadFull(pr, buf) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - return togRPCError(err) - } - br += int64(n) - resp := &pb.SnapshotResponse{ - RemainingBytes: uint64(sz - br), - Blob: buf[:n], - } - if err = srv.Send(resp); err != nil { - return togRPCError(err) - } - h.Write(buf[:n]) - } - - // send sha - sha := h.Sum(nil) - hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha} - if err := srv.Send(hresp); err != nil { - return togRPCError(err) - } - - return nil -} - -func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) { - h, rev, err := ms.kg.KV().Hash() - if err != nil { - return nil, togRPCError(err) - } - resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h} - ms.hdr.fill(resp.Header) - return resp, nil -} - -func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { - return ms.a.Alarm(ctx, ar) -} - -func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) { - resp := &pb.StatusResponse{ - Header: &pb.ResponseHeader{Revision: ms.hdr.rev()}, - Version: version.Version, - DbSize: ms.bg.Backend().Size(), - Leader: uint64(ms.rg.Leader()), - RaftIndex: ms.rg.Index(), - RaftTerm: ms.rg.Term(), - } - ms.hdr.fill(resp.Header) - return resp, nil -} - -type authMaintenanceServer struct { - *maintenanceServer - ag AuthGetter -} - -func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error { - authInfo, err := ams.ag.AuthInfoFromCtx(ctx) - if err != nil { - return err - } - - return ams.ag.AuthStore().IsAdminPermitted(authInfo) -} - -func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { - if err := ams.isAuthenticated(ctx); err != nil { - return nil, err - } - - return ams.maintenanceServer.Defragment(ctx, sr) -} - -func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error { - if err := ams.isAuthenticated(srv.Context()); err != nil { - return err - } - - return ams.maintenanceServer.Snapshot(sr, srv) -} - -func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) { - if err := ams.isAuthenticated(ctx); err != nil { - return nil, err - } - - return ams.maintenanceServer.Hash(ctx, r) -} - -func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) { - return ams.maintenanceServer.Status(ctx, ar) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go deleted file mode 100644 index 91a59389b8734..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "time" - - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/types" - "golang.org/x/net/context" -) - -type ClusterServer struct { - cluster api.Cluster - server etcdserver.Server - raftTimer etcdserver.RaftTimer -} - -func NewClusterServer(s *etcdserver.EtcdServer) *ClusterServer { - return &ClusterServer{ - cluster: s.Cluster(), - server: s, - raftTimer: s, - } -} - -func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) { - urls, err := types.NewURLs(r.PeerURLs) - if err != nil { - return nil, rpctypes.ErrGRPCMemberBadURLs - } - - now := time.Now() - m := membership.NewMember("", urls, "", &now) - membs, merr := cs.server.AddMember(ctx, *m) - if merr != nil { - return nil, togRPCError(merr) - } - - return &pb.MemberAddResponse{ - Header: cs.header(), - Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs}, - Members: membersToProtoMembers(membs), - }, nil -} - -func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) { - membs, err := cs.server.RemoveMember(ctx, r.ID) - if err != nil { - return nil, togRPCError(err) - } - return &pb.MemberRemoveResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil -} - -func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) { - m := membership.Member{ - ID: types.ID(r.ID), - RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs}, - } - membs, err := cs.server.UpdateMember(ctx, m) - if err != nil { - return nil, togRPCError(err) - } - return &pb.MemberUpdateResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil -} - -func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) { - membs := membersToProtoMembers(cs.cluster.Members()) - return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil -} - -func (cs *ClusterServer) header() *pb.ResponseHeader { - return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()} -} - -func membersToProtoMembers(membs []*membership.Member) []*pb.Member { - protoMembs := make([]*pb.Member, len(membs)) - for i := range membs { - protoMembs[i] = &pb.Member{ - Name: membs[i].Name, - ID: uint64(membs[i].ID), - PeerURLs: membs[i].PeerURLs, - ClientURLs: membs[i].ClientURLs, - } - } - return protoMembs -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go deleted file mode 100644 index 6cb41a61e5655..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import "github.com/prometheus/client_golang/prometheus" - -var ( - sentBytes = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "client_grpc_sent_bytes_total", - Help: "The total number of bytes sent to grpc clients.", - }) - - receivedBytes = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "client_grpc_received_bytes_total", - Help: "The total number of bytes received from grpc clients.", - }) -) - -func init() { - prometheus.MustRegister(sentBytes) - prometheus.MustRegister(receivedBytes) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go deleted file mode 100644 index 836f2fd3f4310..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/pkg/types" - "golang.org/x/net/context" -) - -type quotaKVServer struct { - pb.KVServer - qa quotaAlarmer -} - -type quotaAlarmer struct { - q etcdserver.Quota - a Alarmer - id types.ID -} - -// check whether request satisfies the quota. If there is not enough space, -// ignore request and raise the free space alarm. -func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error { - if qa.q.Available(r) { - return nil - } - req := &pb.AlarmRequest{ - MemberID: uint64(qa.id), - Action: pb.AlarmRequest_ACTIVATE, - Alarm: pb.AlarmType_NOSPACE, - } - qa.a.Alarm(ctx, req) - return rpctypes.ErrGRPCNoSpace -} - -func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer { - return "aKVServer{ - NewKVServer(s), - quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()}, - } -} - -func (s *quotaKVServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { - if err := s.qa.check(ctx, r); err != nil { - return nil, err - } - return s.KVServer.Put(ctx, r) -} - -func (s *quotaKVServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - if err := s.qa.check(ctx, r); err != nil { - return nil, err - } - return s.KVServer.Txn(ctx, r) -} - -type quotaLeaseServer struct { - pb.LeaseServer - qa quotaAlarmer -} - -func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - if err := s.qa.check(ctx, cr); err != nil { - return nil, err - } - return s.LeaseServer.LeaseGrant(ctx, cr) -} - -func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { - return "aLeaseServer{ - NewLeaseServer(s), - quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()}, - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go deleted file mode 100644 index 7c86999e4c26e..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "github.com/coreos/etcd/auth" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func togRPCError(err error) error { - switch err { - case membership.ErrIDRemoved: - return rpctypes.ErrGRPCMemberNotFound - case membership.ErrIDNotFound: - return rpctypes.ErrGRPCMemberNotFound - case membership.ErrIDExists: - return rpctypes.ErrGRPCMemberExist - case membership.ErrPeerURLexists: - return rpctypes.ErrGRPCPeerURLExist - case etcdserver.ErrNotEnoughStartedMembers: - return rpctypes.ErrMemberNotEnoughStarted - - case mvcc.ErrCompacted: - return rpctypes.ErrGRPCCompacted - case mvcc.ErrFutureRev: - return rpctypes.ErrGRPCFutureRev - case etcdserver.ErrRequestTooLarge: - return rpctypes.ErrGRPCRequestTooLarge - case etcdserver.ErrNoSpace: - return rpctypes.ErrGRPCNoSpace - case etcdserver.ErrTooManyRequests: - return rpctypes.ErrTooManyRequests - - case etcdserver.ErrNoLeader: - return rpctypes.ErrGRPCNoLeader - case etcdserver.ErrStopped: - return rpctypes.ErrGRPCStopped - case etcdserver.ErrTimeout: - return rpctypes.ErrGRPCTimeout - case etcdserver.ErrTimeoutDueToLeaderFail: - return rpctypes.ErrGRPCTimeoutDueToLeaderFail - case etcdserver.ErrTimeoutDueToConnectionLost: - return rpctypes.ErrGRPCTimeoutDueToConnectionLost - case etcdserver.ErrUnhealthy: - return rpctypes.ErrGRPCUnhealthy - case etcdserver.ErrKeyNotFound: - return rpctypes.ErrGRPCKeyNotFound - - case lease.ErrLeaseNotFound: - return rpctypes.ErrGRPCLeaseNotFound - case lease.ErrLeaseExists: - return rpctypes.ErrGRPCLeaseExist - - case auth.ErrRootUserNotExist: - return rpctypes.ErrGRPCRootUserNotExist - case auth.ErrRootRoleNotExist: - return rpctypes.ErrGRPCRootRoleNotExist - case auth.ErrUserAlreadyExist: - return rpctypes.ErrGRPCUserAlreadyExist - case auth.ErrUserEmpty: - return rpctypes.ErrGRPCUserEmpty - case auth.ErrUserNotFound: - return rpctypes.ErrGRPCUserNotFound - case auth.ErrRoleAlreadyExist: - return rpctypes.ErrGRPCRoleAlreadyExist - case auth.ErrRoleNotFound: - return rpctypes.ErrGRPCRoleNotFound - case auth.ErrAuthFailed: - return rpctypes.ErrGRPCAuthFailed - case auth.ErrPermissionDenied: - return rpctypes.ErrGRPCPermissionDenied - case auth.ErrRoleNotGranted: - return rpctypes.ErrGRPCRoleNotGranted - case auth.ErrPermissionNotGranted: - return rpctypes.ErrGRPCPermissionNotGranted - case auth.ErrAuthNotEnabled: - return rpctypes.ErrGRPCAuthNotEnabled - case auth.ErrInvalidAuthToken: - return rpctypes.ErrGRPCInvalidAuthToken - case auth.ErrInvalidAuthMgmt: - return rpctypes.ErrGRPCInvalidAuthMgmt - default: - return grpc.Errorf(codes.Unknown, err.Error()) - } -} - -func isClientCtxErr(ctxErr error, err error) bool { - if ctxErr != nil { - return true - } - - ev, ok := status.FromError(err) - if !ok { - return false - } - code := ev.Code() - return code == codes.Canceled || code == codes.DeadlineExceeded -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go deleted file mode 100644 index 301ffbaf47f7c..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go +++ /dev/null @@ -1,446 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "io" - "sync" - "time" - - "golang.org/x/net/context" - - "github.com/coreos/etcd/auth" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/mvccpb" -) - -type watchServer struct { - clusterID int64 - memberID int64 - raftTimer etcdserver.RaftTimer - watchable mvcc.WatchableKV - - ag AuthGetter -} - -func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { - return &watchServer{ - clusterID: int64(s.Cluster().ID()), - memberID: int64(s.ID()), - raftTimer: s, - watchable: s.Watchable(), - ag: s, - } -} - -var ( - // External test can read this with GetProgressReportInterval() - // and change this to a small value to finish fast with - // SetProgressReportInterval(). - progressReportInterval = 10 * time.Minute - progressReportIntervalMu sync.RWMutex -) - -func GetProgressReportInterval() time.Duration { - progressReportIntervalMu.RLock() - defer progressReportIntervalMu.RUnlock() - return progressReportInterval -} - -func SetProgressReportInterval(newTimeout time.Duration) { - progressReportIntervalMu.Lock() - defer progressReportIntervalMu.Unlock() - progressReportInterval = newTimeout -} - -const ( - // We send ctrl response inside the read loop. We do not want - // send to block read, but we still want ctrl response we sent to - // be serialized. Thus we use a buffered chan to solve the problem. - // A small buffer should be OK for most cases, since we expect the - // ctrl requests are infrequent. - ctrlStreamBufLen = 16 -) - -// serverWatchStream is an etcd server side stream. It receives requests -// from client side gRPC stream. It receives watch events from mvcc.WatchStream, -// and creates responses that forwarded to gRPC stream. -// It also forwards control message like watch created and canceled. -type serverWatchStream struct { - clusterID int64 - memberID int64 - raftTimer etcdserver.RaftTimer - - watchable mvcc.WatchableKV - - gRPCStream pb.Watch_WatchServer - watchStream mvcc.WatchStream - ctrlStream chan *pb.WatchResponse - - // mu protects progress, prevKV - mu sync.Mutex - // progress tracks the watchID that stream might need to send - // progress to. - // TODO: combine progress and prevKV into a single struct? - progress map[mvcc.WatchID]bool - prevKV map[mvcc.WatchID]bool - - // closec indicates the stream is closed. - closec chan struct{} - - // wg waits for the send loop to complete - wg sync.WaitGroup - - ag AuthGetter -} - -func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { - sws := serverWatchStream{ - clusterID: ws.clusterID, - memberID: ws.memberID, - raftTimer: ws.raftTimer, - - watchable: ws.watchable, - - gRPCStream: stream, - watchStream: ws.watchable.NewWatchStream(), - // chan for sending control response like watcher created and canceled. - ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen), - progress: make(map[mvcc.WatchID]bool), - prevKV: make(map[mvcc.WatchID]bool), - closec: make(chan struct{}), - - ag: ws.ag, - } - - sws.wg.Add(1) - go func() { - sws.sendLoop() - sws.wg.Done() - }() - - errc := make(chan error, 1) - // Ideally recvLoop would also use sws.wg to signal its completion - // but when stream.Context().Done() is closed, the stream's recv - // may continue to block since it uses a different context, leading to - // deadlock when calling sws.close(). - go func() { - if rerr := sws.recvLoop(); rerr != nil { - if isClientCtxErr(stream.Context().Err(), rerr) { - plog.Debugf("failed to receive watch request from gRPC stream (%q)", rerr.Error()) - } else { - plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error()) - } - errc <- rerr - } - }() - select { - case err = <-errc: - close(sws.ctrlStream) - case <-stream.Context().Done(): - err = stream.Context().Err() - // the only server-side cancellation is noleader for now. - if err == context.Canceled { - err = rpctypes.ErrGRPCNoLeader - } - } - sws.close() - return err -} - -func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool { - authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context()) - if err != nil { - return false - } - if authInfo == nil { - // if auth is enabled, IsRangePermitted() can cause an error - authInfo = &auth.AuthInfo{} - } - - return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil -} - -func (sws *serverWatchStream) recvLoop() error { - for { - req, err := sws.gRPCStream.Recv() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - - switch uv := req.RequestUnion.(type) { - case *pb.WatchRequest_CreateRequest: - if uv.CreateRequest == nil { - break - } - - creq := uv.CreateRequest - if len(creq.Key) == 0 { - // \x00 is the smallest key - creq.Key = []byte{0} - } - if len(creq.RangeEnd) == 0 { - // force nil since watchstream.Watch distinguishes - // between nil and []byte{} for single key / >= - creq.RangeEnd = nil - } - if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 { - // support >= key queries - creq.RangeEnd = []byte{} - } - - if !sws.isWatchPermitted(creq) { - wr := &pb.WatchResponse{ - Header: sws.newResponseHeader(sws.watchStream.Rev()), - WatchId: -1, - Canceled: true, - Created: true, - CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(), - } - - select { - case sws.ctrlStream <- wr: - case <-sws.closec: - } - return nil - } - - filters := FiltersFromRequest(creq) - - wsrev := sws.watchStream.Rev() - rev := creq.StartRevision - if rev == 0 { - rev = wsrev + 1 - } - id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev, filters...) - if id != -1 { - sws.mu.Lock() - if creq.ProgressNotify { - sws.progress[id] = true - } - if creq.PrevKv { - sws.prevKV[id] = true - } - sws.mu.Unlock() - } - wr := &pb.WatchResponse{ - Header: sws.newResponseHeader(wsrev), - WatchId: int64(id), - Created: true, - Canceled: id == -1, - } - select { - case sws.ctrlStream <- wr: - case <-sws.closec: - return nil - } - case *pb.WatchRequest_CancelRequest: - if uv.CancelRequest != nil { - id := uv.CancelRequest.WatchId - err := sws.watchStream.Cancel(mvcc.WatchID(id)) - if err == nil { - sws.ctrlStream <- &pb.WatchResponse{ - Header: sws.newResponseHeader(sws.watchStream.Rev()), - WatchId: id, - Canceled: true, - } - sws.mu.Lock() - delete(sws.progress, mvcc.WatchID(id)) - delete(sws.prevKV, mvcc.WatchID(id)) - sws.mu.Unlock() - } - } - default: - // we probably should not shutdown the entire stream when - // receive an valid command. - // so just do nothing instead. - continue - } - } -} - -func (sws *serverWatchStream) sendLoop() { - // watch ids that are currently active - ids := make(map[mvcc.WatchID]struct{}) - // watch responses pending on a watch id creation message - pending := make(map[mvcc.WatchID][]*pb.WatchResponse) - - interval := GetProgressReportInterval() - progressTicker := time.NewTicker(interval) - - defer func() { - progressTicker.Stop() - // drain the chan to clean up pending events - for ws := range sws.watchStream.Chan() { - mvcc.ReportEventReceived(len(ws.Events)) - } - for _, wrs := range pending { - for _, ws := range wrs { - mvcc.ReportEventReceived(len(ws.Events)) - } - } - }() - - for { - select { - case wresp, ok := <-sws.watchStream.Chan(): - if !ok { - return - } - - // TODO: evs is []mvccpb.Event type - // either return []*mvccpb.Event from the mvcc package - // or define protocol buffer with []mvccpb.Event. - evs := wresp.Events - events := make([]*mvccpb.Event, len(evs)) - sws.mu.Lock() - needPrevKV := sws.prevKV[wresp.WatchID] - sws.mu.Unlock() - for i := range evs { - events[i] = &evs[i] - - if needPrevKV { - opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1} - r, err := sws.watchable.Range(evs[i].Kv.Key, nil, opt) - if err == nil && len(r.KVs) != 0 { - events[i].PrevKv = &(r.KVs[0]) - } - } - } - - wr := &pb.WatchResponse{ - Header: sws.newResponseHeader(wresp.Revision), - WatchId: int64(wresp.WatchID), - Events: events, - CompactRevision: wresp.CompactRevision, - } - - if _, hasId := ids[wresp.WatchID]; !hasId { - // buffer if id not yet announced - wrs := append(pending[wresp.WatchID], wr) - pending[wresp.WatchID] = wrs - continue - } - - mvcc.ReportEventReceived(len(evs)) - if err := sws.gRPCStream.Send(wr); err != nil { - if isClientCtxErr(sws.gRPCStream.Context().Err(), err) { - plog.Debugf("failed to send watch response to gRPC stream (%q)", err.Error()) - } else { - plog.Warningf("failed to send watch response to gRPC stream (%q)", err.Error()) - } - return - } - - sws.mu.Lock() - if len(evs) > 0 && sws.progress[wresp.WatchID] { - // elide next progress update if sent a key update - sws.progress[wresp.WatchID] = false - } - sws.mu.Unlock() - - case c, ok := <-sws.ctrlStream: - if !ok { - return - } - - if err := sws.gRPCStream.Send(c); err != nil { - if isClientCtxErr(sws.gRPCStream.Context().Err(), err) { - plog.Debugf("failed to send watch control response to gRPC stream (%q)", err.Error()) - } else { - plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error()) - } - return - } - - // track id creation - wid := mvcc.WatchID(c.WatchId) - if c.Canceled { - delete(ids, wid) - continue - } - if c.Created { - // flush buffered events - ids[wid] = struct{}{} - for _, v := range pending[wid] { - mvcc.ReportEventReceived(len(v.Events)) - if err := sws.gRPCStream.Send(v); err != nil { - if isClientCtxErr(sws.gRPCStream.Context().Err(), err) { - plog.Debugf("failed to send pending watch response to gRPC stream (%q)", err.Error()) - } else { - plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error()) - } - return - } - } - delete(pending, wid) - } - case <-progressTicker.C: - sws.mu.Lock() - for id, ok := range sws.progress { - if ok { - sws.watchStream.RequestProgress(id) - } - sws.progress[id] = true - } - sws.mu.Unlock() - case <-sws.closec: - return - } - } -} - -func (sws *serverWatchStream) close() { - sws.watchStream.Close() - close(sws.closec) - sws.wg.Wait() -} - -func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader { - return &pb.ResponseHeader{ - ClusterId: uint64(sws.clusterID), - MemberId: uint64(sws.memberID), - Revision: rev, - RaftTerm: sws.raftTimer.Term(), - } -} - -func filterNoDelete(e mvccpb.Event) bool { - return e.Type == mvccpb.DELETE -} - -func filterNoPut(e mvccpb.Event) bool { - return e.Type == mvccpb.PUT -} - -func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc { - filters := make([]mvcc.FilterFunc, 0, len(creq.Filters)) - for _, ft := range creq.Filters { - switch ft { - case pb.WatchCreateRequest_NOPUT: - filters = append(filters, filterNoPut) - case pb.WatchCreateRequest_NODELETE: - filters = append(filters, filterNoDelete) - default: - } - } - return filters -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply.go b/vendor/github.com/coreos/etcd/etcdserver/apply.go deleted file mode 100644 index 0be93c52b6f44..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/apply.go +++ /dev/null @@ -1,878 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "bytes" - "sort" - "time" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/mvccpb" - "github.com/coreos/etcd/pkg/types" - "github.com/gogo/protobuf/proto" - "golang.org/x/net/context" -) - -const ( - warnApplyDuration = 100 * time.Millisecond -) - -type applyResult struct { - resp proto.Message - err error - // physc signals the physical effect of the request has completed in addition - // to being logically reflected by the node. Currently only used for - // Compaction requests. - physc <-chan struct{} -} - -// applierV3 is the interface for processing V3 raft messages -type applierV3 interface { - Apply(r *pb.InternalRaftRequest) *applyResult - - Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) - Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) - DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) - Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) - Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) - - LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) - LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) - - Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error) - - Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) - - AuthEnable() (*pb.AuthEnableResponse, error) - AuthDisable() (*pb.AuthDisableResponse, error) - - UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) - UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) - UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) - UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) - UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) - UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) - RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) - RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) - RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) - RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) - RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) - UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) - RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) -} - -type applierV3backend struct { - s *EtcdServer -} - -func (s *EtcdServer) newApplierV3() applierV3 { - return newAuthApplierV3( - s.AuthStore(), - newQuotaApplierV3(s, &applierV3backend{s}), - ) -} - -func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult { - ar := &applyResult{} - - // call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls - switch { - case r.Range != nil: - ar.resp, ar.err = a.s.applyV3.Range(nil, r.Range) - case r.Put != nil: - ar.resp, ar.err = a.s.applyV3.Put(nil, r.Put) - case r.DeleteRange != nil: - ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange) - case r.Txn != nil: - ar.resp, ar.err = a.s.applyV3.Txn(r.Txn) - case r.Compaction != nil: - ar.resp, ar.physc, ar.err = a.s.applyV3.Compaction(r.Compaction) - case r.LeaseGrant != nil: - ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant) - case r.LeaseRevoke != nil: - ar.resp, ar.err = a.s.applyV3.LeaseRevoke(r.LeaseRevoke) - case r.Alarm != nil: - ar.resp, ar.err = a.s.applyV3.Alarm(r.Alarm) - case r.Authenticate != nil: - ar.resp, ar.err = a.s.applyV3.Authenticate(r.Authenticate) - case r.AuthEnable != nil: - ar.resp, ar.err = a.s.applyV3.AuthEnable() - case r.AuthDisable != nil: - ar.resp, ar.err = a.s.applyV3.AuthDisable() - case r.AuthUserAdd != nil: - ar.resp, ar.err = a.s.applyV3.UserAdd(r.AuthUserAdd) - case r.AuthUserDelete != nil: - ar.resp, ar.err = a.s.applyV3.UserDelete(r.AuthUserDelete) - case r.AuthUserChangePassword != nil: - ar.resp, ar.err = a.s.applyV3.UserChangePassword(r.AuthUserChangePassword) - case r.AuthUserGrantRole != nil: - ar.resp, ar.err = a.s.applyV3.UserGrantRole(r.AuthUserGrantRole) - case r.AuthUserGet != nil: - ar.resp, ar.err = a.s.applyV3.UserGet(r.AuthUserGet) - case r.AuthUserRevokeRole != nil: - ar.resp, ar.err = a.s.applyV3.UserRevokeRole(r.AuthUserRevokeRole) - case r.AuthRoleAdd != nil: - ar.resp, ar.err = a.s.applyV3.RoleAdd(r.AuthRoleAdd) - case r.AuthRoleGrantPermission != nil: - ar.resp, ar.err = a.s.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission) - case r.AuthRoleGet != nil: - ar.resp, ar.err = a.s.applyV3.RoleGet(r.AuthRoleGet) - case r.AuthRoleRevokePermission != nil: - ar.resp, ar.err = a.s.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission) - case r.AuthRoleDelete != nil: - ar.resp, ar.err = a.s.applyV3.RoleDelete(r.AuthRoleDelete) - case r.AuthUserList != nil: - ar.resp, ar.err = a.s.applyV3.UserList(r.AuthUserList) - case r.AuthRoleList != nil: - ar.resp, ar.err = a.s.applyV3.RoleList(r.AuthRoleList) - default: - panic("not implemented") - } - return ar -} - -func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) { - resp = &pb.PutResponse{} - resp.Header = &pb.ResponseHeader{} - - val, leaseID := p.Value, lease.LeaseID(p.Lease) - if txn == nil { - if leaseID != lease.NoLease { - if l := a.s.lessor.Lookup(leaseID); l == nil { - return nil, lease.ErrLeaseNotFound - } - } - txn = a.s.KV().Write() - defer txn.End() - } - - var rr *mvcc.RangeResult - if p.IgnoreValue || p.IgnoreLease || p.PrevKv { - rr, err = txn.Range(p.Key, nil, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - } - if p.IgnoreValue || p.IgnoreLease { - if rr == nil || len(rr.KVs) == 0 { - // ignore_{lease,value} flag expects previous key-value pair - return nil, ErrKeyNotFound - } - } - if p.IgnoreValue { - val = rr.KVs[0].Value - } - if p.IgnoreLease { - leaseID = lease.LeaseID(rr.KVs[0].Lease) - } - if p.PrevKv { - if rr != nil && len(rr.KVs) != 0 { - resp.PrevKv = &rr.KVs[0] - } - } - - resp.Header.Revision = txn.Put(p.Key, val, leaseID) - return resp, nil -} - -func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - resp := &pb.DeleteRangeResponse{} - resp.Header = &pb.ResponseHeader{} - - if txn == nil { - txn = a.s.kv.Write() - defer txn.End() - } - - if isGteRange(dr.RangeEnd) { - dr.RangeEnd = []byte{} - } - - if dr.PrevKv { - rr, err := txn.Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - if rr != nil { - for i := range rr.KVs { - resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i]) - } - } - } - - resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, dr.RangeEnd) - return resp, nil -} - -func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { - resp := &pb.RangeResponse{} - resp.Header = &pb.ResponseHeader{} - - if txn == nil { - txn = a.s.kv.Read() - defer txn.End() - } - - if isGteRange(r.RangeEnd) { - r.RangeEnd = []byte{} - } - - limit := r.Limit - if r.SortOrder != pb.RangeRequest_NONE || - r.MinModRevision != 0 || r.MaxModRevision != 0 || - r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 { - // fetch everything; sort and truncate afterwards - limit = 0 - } - if limit > 0 { - // fetch one extra for 'more' flag - limit = limit + 1 - } - - ro := mvcc.RangeOptions{ - Limit: limit, - Rev: r.Revision, - Count: r.CountOnly, - } - - rr, err := txn.Range(r.Key, r.RangeEnd, ro) - if err != nil { - return nil, err - } - - if r.MaxModRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision } - pruneKVs(rr, f) - } - if r.MinModRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision } - pruneKVs(rr, f) - } - if r.MaxCreateRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision } - pruneKVs(rr, f) - } - if r.MinCreateRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision } - pruneKVs(rr, f) - } - - sortOrder := r.SortOrder - if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE { - // Since current mvcc.Range implementation returns results - // sorted by keys in lexiographically ascending order, - // sort ASCEND by default only when target is not 'KEY' - sortOrder = pb.RangeRequest_ASCEND - } - if sortOrder != pb.RangeRequest_NONE { - var sorter sort.Interface - switch { - case r.SortTarget == pb.RangeRequest_KEY: - sorter = &kvSortByKey{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_VERSION: - sorter = &kvSortByVersion{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_CREATE: - sorter = &kvSortByCreate{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_MOD: - sorter = &kvSortByMod{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_VALUE: - sorter = &kvSortByValue{&kvSort{rr.KVs}} - } - switch { - case sortOrder == pb.RangeRequest_ASCEND: - sort.Sort(sorter) - case sortOrder == pb.RangeRequest_DESCEND: - sort.Sort(sort.Reverse(sorter)) - } - } - - if r.Limit > 0 && len(rr.KVs) > int(r.Limit) { - rr.KVs = rr.KVs[:r.Limit] - resp.More = true - } - - resp.Header.Revision = rr.Rev - resp.Count = int64(rr.Count) - for i := range rr.KVs { - if r.KeysOnly { - rr.KVs[i].Value = nil - } - resp.Kvs = append(resp.Kvs, &rr.KVs[i]) - } - return resp, nil -} - -func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { - isWrite := !isTxnReadonly(rt) - txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read()) - - reqs, ok := a.compareToOps(txn, rt) - if isWrite { - if err := a.checkRequestPut(txn, reqs); err != nil { - txn.End() - return nil, err - } - } - if err := checkRequestRange(txn, reqs); err != nil { - txn.End() - return nil, err - } - - resps := make([]*pb.ResponseOp, len(reqs)) - txnResp := &pb.TxnResponse{ - Responses: resps, - Succeeded: ok, - Header: &pb.ResponseHeader{}, - } - - // When executing mutable txn ops, etcd must hold the txn lock so - // readers do not see any intermediate results. Since writes are - // serialized on the raft loop, the revision in the read view will - // be the revision of the write txn. - if isWrite { - txn.End() - txn = a.s.KV().Write() - } - for i := range reqs { - resps[i] = a.applyUnion(txn, reqs[i]) - } - rev := txn.Rev() - if len(txn.Changes()) != 0 { - rev++ - } - txn.End() - - txnResp.Header.Revision = rev - return txnResp, nil -} - -func (a *applierV3backend) compareToOps(rv mvcc.ReadView, rt *pb.TxnRequest) ([]*pb.RequestOp, bool) { - for _, c := range rt.Compare { - if !applyCompare(rv, c) { - return rt.Failure, false - } - } - return rt.Success, true -} - -// applyCompare applies the compare request. -// If the comparison succeeds, it returns true. Otherwise, returns false. -func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool { - rr, err := rv.Range(c.Key, nil, mvcc.RangeOptions{}) - if err != nil { - return false - } - var ckv mvccpb.KeyValue - if len(rr.KVs) != 0 { - ckv = rr.KVs[0] - } else { - // Use the zero value of ckv normally. However... - if c.Target == pb.Compare_VALUE { - // Always fail if we're comparing a value on a key that doesn't exist. - // We can treat non-existence as the empty set explicitly, such that - // even a key with a value of length 0 bytes is still a real key - // that was written that way - return false - } - } - - // -1 is less, 0 is equal, 1 is greater - var result int - switch c.Target { - case pb.Compare_VALUE: - tv, _ := c.TargetUnion.(*pb.Compare_Value) - if tv != nil { - result = bytes.Compare(ckv.Value, tv.Value) - } - case pb.Compare_CREATE: - tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision) - if tv != nil { - result = compareInt64(ckv.CreateRevision, tv.CreateRevision) - } - - case pb.Compare_MOD: - tv, _ := c.TargetUnion.(*pb.Compare_ModRevision) - if tv != nil { - result = compareInt64(ckv.ModRevision, tv.ModRevision) - } - case pb.Compare_VERSION: - tv, _ := c.TargetUnion.(*pb.Compare_Version) - if tv != nil { - result = compareInt64(ckv.Version, tv.Version) - } - } - - switch c.Result { - case pb.Compare_EQUAL: - return result == 0 - case pb.Compare_NOT_EQUAL: - return result != 0 - case pb.Compare_GREATER: - return result > 0 - case pb.Compare_LESS: - return result < 0 - } - return true -} - -func (a *applierV3backend) applyUnion(txn mvcc.TxnWrite, union *pb.RequestOp) *pb.ResponseOp { - switch tv := union.Request.(type) { - case *pb.RequestOp_RequestRange: - if tv.RequestRange != nil { - resp, err := a.Range(txn, tv.RequestRange) - if err != nil { - plog.Panicf("unexpected error during txn: %v", err) - } - return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{ResponseRange: resp}} - } - case *pb.RequestOp_RequestPut: - if tv.RequestPut != nil { - resp, err := a.Put(txn, tv.RequestPut) - if err != nil { - plog.Panicf("unexpected error during txn: %v", err) - } - return &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{ResponsePut: resp}} - } - case *pb.RequestOp_RequestDeleteRange: - if tv.RequestDeleteRange != nil { - resp, err := a.DeleteRange(txn, tv.RequestDeleteRange) - if err != nil { - plog.Panicf("unexpected error during txn: %v", err) - } - return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{ResponseDeleteRange: resp}} - } - default: - // empty union - return nil - } - return nil - -} - -func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) { - resp := &pb.CompactionResponse{} - resp.Header = &pb.ResponseHeader{} - ch, err := a.s.KV().Compact(compaction.Revision) - if err != nil { - return nil, ch, err - } - // get the current revision. which key to get is not important. - rr, _ := a.s.KV().Range([]byte("compaction"), nil, mvcc.RangeOptions{}) - resp.Header.Revision = rr.Rev - return resp, ch, err -} - -func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - l, err := a.s.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL) - resp := &pb.LeaseGrantResponse{} - if err == nil { - resp.ID = int64(l.ID) - resp.TTL = l.TTL() - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - err := a.s.lessor.Revoke(lease.LeaseID(lc.ID)) - return &pb.LeaseRevokeResponse{Header: newHeader(a.s)}, err -} - -func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { - resp := &pb.AlarmResponse{} - oldCount := len(a.s.alarmStore.Get(ar.Alarm)) - - switch ar.Action { - case pb.AlarmRequest_GET: - resp.Alarms = a.s.alarmStore.Get(ar.Alarm) - case pb.AlarmRequest_ACTIVATE: - m := a.s.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm) - if m == nil { - break - } - resp.Alarms = append(resp.Alarms, m) - activated := oldCount == 0 && len(a.s.alarmStore.Get(m.Alarm)) == 1 - if !activated { - break - } - - switch m.Alarm { - case pb.AlarmType_NOSPACE: - plog.Warningf("alarm raised %+v", m) - a.s.applyV3 = newApplierV3Capped(a) - default: - plog.Errorf("unimplemented alarm activation (%+v)", m) - } - case pb.AlarmRequest_DEACTIVATE: - m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm) - if m == nil { - break - } - resp.Alarms = append(resp.Alarms, m) - deactivated := oldCount > 0 && len(a.s.alarmStore.Get(ar.Alarm)) == 0 - if !deactivated { - break - } - - switch m.Alarm { - case pb.AlarmType_NOSPACE: - plog.Infof("alarm disarmed %+v", ar) - a.s.applyV3 = a.s.newApplierV3() - default: - plog.Errorf("unimplemented alarm deactivation (%+v)", m) - } - default: - return nil, nil - } - return resp, nil -} - -type applierV3Capped struct { - applierV3 - q backendQuota -} - -// newApplierV3Capped creates an applyV3 that will reject Puts and transactions -// with Puts so that the number of keys in the store is capped. -func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} } - -func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) { - return nil, ErrNoSpace -} - -func (a *applierV3Capped) Txn(r *pb.TxnRequest) (*pb.TxnResponse, error) { - if a.q.Cost(r) > 0 { - return nil, ErrNoSpace - } - return a.applierV3.Txn(r) -} - -func (a *applierV3Capped) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - return nil, ErrNoSpace -} - -func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) { - err := a.s.AuthStore().AuthEnable() - if err != nil { - return nil, err - } - return &pb.AuthEnableResponse{Header: newHeader(a.s)}, nil -} - -func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) { - a.s.AuthStore().AuthDisable() - return &pb.AuthDisableResponse{Header: newHeader(a.s)}, nil -} - -func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) { - ctx := context.WithValue(context.WithValue(a.s.ctx, "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken) - resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - resp, err := a.s.AuthStore().UserAdd(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - resp, err := a.s.AuthStore().UserDelete(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - resp, err := a.s.AuthStore().UserChangePassword(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - resp, err := a.s.AuthStore().UserGrantRole(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - resp, err := a.s.AuthStore().UserGet(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - resp, err := a.s.AuthStore().UserRevokeRole(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - resp, err := a.s.AuthStore().RoleAdd(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - resp, err := a.s.AuthStore().RoleGrantPermission(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - resp, err := a.s.AuthStore().RoleGet(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - resp, err := a.s.AuthStore().RoleRevokePermission(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - resp, err := a.s.AuthStore().RoleDelete(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - resp, err := a.s.AuthStore().UserList(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - resp, err := a.s.AuthStore().RoleList(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -type quotaApplierV3 struct { - applierV3 - q Quota -} - -func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 { - return "aApplierV3{app, NewBackendQuota(s)} -} - -func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) { - ok := a.q.Available(p) - resp, err := a.applierV3.Put(txn, p) - if err == nil && !ok { - err = ErrNoSpace - } - return resp, err -} - -func (a *quotaApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { - ok := a.q.Available(rt) - resp, err := a.applierV3.Txn(rt) - if err == nil && !ok { - err = ErrNoSpace - } - return resp, err -} - -func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - ok := a.q.Available(lc) - resp, err := a.applierV3.LeaseGrant(lc) - if err == nil && !ok { - err = ErrNoSpace - } - return resp, err -} - -type kvSort struct{ kvs []mvccpb.KeyValue } - -func (s *kvSort) Swap(i, j int) { - t := s.kvs[i] - s.kvs[i] = s.kvs[j] - s.kvs[j] = t -} -func (s *kvSort) Len() int { return len(s.kvs) } - -type kvSortByKey struct{ *kvSort } - -func (s *kvSortByKey) Less(i, j int) bool { - return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0 -} - -type kvSortByVersion struct{ *kvSort } - -func (s *kvSortByVersion) Less(i, j int) bool { - return (s.kvs[i].Version - s.kvs[j].Version) < 0 -} - -type kvSortByCreate struct{ *kvSort } - -func (s *kvSortByCreate) Less(i, j int) bool { - return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0 -} - -type kvSortByMod struct{ *kvSort } - -func (s *kvSortByMod) Less(i, j int) bool { - return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0 -} - -type kvSortByValue struct{ *kvSort } - -func (s *kvSortByValue) Less(i, j int) bool { - return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0 -} - -func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqs []*pb.RequestOp) error { - for _, requ := range reqs { - tv, ok := requ.Request.(*pb.RequestOp_RequestPut) - if !ok { - continue - } - preq := tv.RequestPut - if preq == nil { - continue - } - if preq.IgnoreValue || preq.IgnoreLease { - // expects previous key-value, error if not exist - rr, err := rv.Range(preq.Key, nil, mvcc.RangeOptions{}) - if err != nil { - return err - } - if rr == nil || len(rr.KVs) == 0 { - return ErrKeyNotFound - } - } - if lease.LeaseID(preq.Lease) == lease.NoLease { - continue - } - if l := a.s.lessor.Lookup(lease.LeaseID(preq.Lease)); l == nil { - return lease.ErrLeaseNotFound - } - } - return nil -} - -func checkRequestRange(rv mvcc.ReadView, reqs []*pb.RequestOp) error { - for _, requ := range reqs { - tv, ok := requ.Request.(*pb.RequestOp_RequestRange) - if !ok { - continue - } - greq := tv.RequestRange - if greq == nil || greq.Revision == 0 { - continue - } - - if greq.Revision > rv.Rev() { - return mvcc.ErrFutureRev - } - if greq.Revision < rv.FirstRev() { - return mvcc.ErrCompacted - } - } - return nil -} - -func compareInt64(a, b int64) int { - switch { - case a < b: - return -1 - case a > b: - return 1 - default: - return 0 - } -} - -// isGteRange determines if the range end is a >= range. This works around grpc -// sending empty byte strings as nil; >= is encoded in the range end as '\0'. -func isGteRange(rangeEnd []byte) bool { - return len(rangeEnd) == 1 && rangeEnd[0] == 0 -} - -func noSideEffect(r *pb.InternalRaftRequest) bool { - return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil -} - -func removeNeedlessRangeReqs(txn *pb.TxnRequest) { - f := func(ops []*pb.RequestOp) []*pb.RequestOp { - j := 0 - for i := 0; i < len(ops); i++ { - if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok { - continue - } - ops[j] = ops[i] - j++ - } - - return ops[:j] - } - - txn.Success = f(txn.Success) - txn.Failure = f(txn.Failure) -} - -func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) { - j := 0 - for i := range rr.KVs { - rr.KVs[j] = rr.KVs[i] - if !isPrunable(&rr.KVs[i]) { - j++ - } - } - rr.KVs = rr.KVs[:j] -} - -func newHeader(s *EtcdServer) *pb.ResponseHeader { - return &pb.ResponseHeader{ - ClusterId: uint64(s.Cluster().ID()), - MemberId: uint64(s.ID()), - Revision: s.KV().Rev(), - RaftTerm: s.Term(), - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go deleted file mode 100644 index 7da4ae45df5d1..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "sync" - - "github.com/coreos/etcd/auth" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc" -) - -type authApplierV3 struct { - applierV3 - as auth.AuthStore - - // mu serializes Apply so that user isn't corrupted and so that - // serialized requests don't leak data from TOCTOU errors - mu sync.Mutex - - authInfo auth.AuthInfo -} - -func newAuthApplierV3(as auth.AuthStore, base applierV3) *authApplierV3 { - return &authApplierV3{applierV3: base, as: as} -} - -func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult { - aa.mu.Lock() - defer aa.mu.Unlock() - if r.Header != nil { - // backward-compatible with pre-3.0 releases when internalRaftRequest - // does not have header field - aa.authInfo.Username = r.Header.Username - aa.authInfo.Revision = r.Header.AuthRevision - } - if needAdminPermission(r) { - if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil { - aa.authInfo.Username = "" - aa.authInfo.Revision = 0 - return &applyResult{err: err} - } - } - ret := aa.applierV3.Apply(r) - aa.authInfo.Username = "" - aa.authInfo.Revision = 0 - return ret -} - -func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, error) { - if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil { - return nil, err - } - if r.PrevKv { - err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil) - if err != nil { - return nil, err - } - } - return aa.applierV3.Put(txn, r) -} - -func (aa *authApplierV3) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { - if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { - return nil, err - } - return aa.applierV3.Range(txn, r) -} - -func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { - return nil, err - } - if r.PrevKv { - err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd) - if err != nil { - return nil, err - } - } - - return aa.applierV3.DeleteRange(txn, r) -} - -func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error { - for _, requ := range reqs { - switch tv := requ.Request.(type) { - case *pb.RequestOp_RequestRange: - if tv.RequestRange == nil { - continue - } - - if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil { - return err - } - - case *pb.RequestOp_RequestPut: - if tv.RequestPut == nil { - continue - } - - if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil { - return err - } - - case *pb.RequestOp_RequestDeleteRange: - if tv.RequestDeleteRange == nil { - continue - } - - if tv.RequestDeleteRange.PrevKv { - err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) - if err != nil { - return err - } - } - - err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) - if err != nil { - return err - } - } - } - - return nil -} - -func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error { - for _, c := range rt.Compare { - if err := as.IsRangePermitted(ai, c.Key, nil); err != nil { - return err - } - } - if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil { - return err - } - if err := checkTxnReqsPermission(as, ai, rt.Failure); err != nil { - return err - } - return nil -} - -func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { - if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil { - return nil, err - } - return aa.applierV3.Txn(rt) -} - -func needAdminPermission(r *pb.InternalRaftRequest) bool { - switch { - case r.AuthEnable != nil: - return true - case r.AuthDisable != nil: - return true - case r.AuthUserAdd != nil: - return true - case r.AuthUserDelete != nil: - return true - case r.AuthUserChangePassword != nil: - return true - case r.AuthUserGrantRole != nil: - return true - case r.AuthUserGet != nil: - return true - case r.AuthUserRevokeRole != nil: - return true - case r.AuthRoleAdd != nil: - return true - case r.AuthRoleGrantPermission != nil: - return true - case r.AuthRoleGet != nil: - return true - case r.AuthRoleRevokePermission != nil: - return true - case r.AuthRoleDelete != nil: - return true - case r.AuthUserList != nil: - return true - case r.AuthRoleList != nil: - return true - default: - return false - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go b/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go deleted file mode 100644 index f278efca88e57..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "encoding/json" - "path" - "time" - - "github.com/coreos/etcd/etcdserver/api" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/store" - "github.com/coreos/go-semver/semver" -) - -// ApplierV2 is the interface for processing V2 raft messages -type ApplierV2 interface { - Delete(r *pb.Request) Response - Post(r *pb.Request) Response - Put(r *pb.Request) Response - QGet(r *pb.Request) Response - Sync(r *pb.Request) Response -} - -func NewApplierV2(s store.Store, c *membership.RaftCluster) ApplierV2 { - return &applierV2store{store: s, cluster: c} -} - -type applierV2store struct { - store store.Store - cluster *membership.RaftCluster -} - -func (a *applierV2store) Delete(r *pb.Request) Response { - switch { - case r.PrevIndex > 0 || r.PrevValue != "": - return toResponse(a.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex)) - default: - return toResponse(a.store.Delete(r.Path, r.Dir, r.Recursive)) - } -} - -func (a *applierV2store) Post(r *pb.Request) Response { - return toResponse(a.store.Create(r.Path, r.Dir, r.Val, true, toTTLOptions(r))) -} - -func (a *applierV2store) Put(r *pb.Request) Response { - ttlOptions := toTTLOptions(r) - exists, existsSet := pbutil.GetBool(r.PrevExist) - switch { - case existsSet: - if exists { - if r.PrevIndex == 0 && r.PrevValue == "" { - return toResponse(a.store.Update(r.Path, r.Val, ttlOptions)) - } - return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions)) - } - return toResponse(a.store.Create(r.Path, r.Dir, r.Val, false, ttlOptions)) - case r.PrevIndex > 0 || r.PrevValue != "": - return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions)) - default: - if storeMemberAttributeRegexp.MatchString(r.Path) { - id := membership.MustParseMemberIDFromKey(path.Dir(r.Path)) - var attr membership.Attributes - if err := json.Unmarshal([]byte(r.Val), &attr); err != nil { - plog.Panicf("unmarshal %s should never fail: %v", r.Val, err) - } - if a.cluster != nil { - a.cluster.UpdateAttributes(id, attr) - } - // return an empty response since there is no consumer. - return Response{} - } - if r.Path == membership.StoreClusterVersionKey() { - if a.cluster != nil { - a.cluster.SetVersion(semver.Must(semver.NewVersion(r.Val)), api.UpdateCapability) - } - // return an empty response since there is no consumer. - return Response{} - } - return toResponse(a.store.Set(r.Path, r.Dir, r.Val, ttlOptions)) - } -} - -func (a *applierV2store) QGet(r *pb.Request) Response { - return toResponse(a.store.Get(r.Path, r.Recursive, r.Sorted)) -} - -func (a *applierV2store) Sync(r *pb.Request) Response { - a.store.DeleteExpiredKeys(time.Unix(0, r.Time)) - return Response{} -} - -// applyV2Request interprets r as a call to store.X and returns a Response interpreted -// from store.Event -func (s *EtcdServer) applyV2Request(r *pb.Request) Response { - toTTLOptions(r) - switch r.Method { - case "POST": - return s.applyV2.Post(r) - case "PUT": - return s.applyV2.Put(r) - case "DELETE": - return s.applyV2.Delete(r) - case "QGET": - return s.applyV2.QGet(r) - case "SYNC": - return s.applyV2.Sync(r) - default: - // This should never be reached, but just in case: - return Response{err: ErrUnknownMethod} - } -} - -func toTTLOptions(r *pb.Request) store.TTLOptionSet { - refresh, _ := pbutil.GetBool(r.Refresh) - ttlOptions := store.TTLOptionSet{Refresh: refresh} - if r.Expiration != 0 { - ttlOptions.ExpireTime = time.Unix(0, r.Expiration) - } - return ttlOptions -} - -func toResponse(ev *store.Event, err error) Response { - return Response{Event: ev, err: err} -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/backend.go b/vendor/github.com/coreos/etcd/etcdserver/backend.go deleted file mode 100644 index c5e2dabf3e712..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/backend.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "fmt" - "os" - "time" - - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap" -) - -func newBackend(cfg *ServerConfig) backend.Backend { - bcfg := backend.DefaultBackendConfig() - bcfg.Path = cfg.backendPath() - if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes { - // permit 10% excess over quota for disarm - bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10) - } - return backend.New(bcfg) -} - -// openSnapshotBackend renames a snapshot db to the current etcd db and opens it. -func openSnapshotBackend(cfg *ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) { - snapPath, err := ss.DBFilePath(snapshot.Metadata.Index) - if err != nil { - return nil, fmt.Errorf("database snapshot file path error: %v", err) - } - if err := os.Rename(snapPath, cfg.backendPath()); err != nil { - return nil, fmt.Errorf("rename snapshot file error: %v", err) - } - return openBackend(cfg), nil -} - -// openBackend returns a backend using the current etcd db. -func openBackend(cfg *ServerConfig) backend.Backend { - fn := cfg.backendPath() - beOpened := make(chan backend.Backend) - go func() { - beOpened <- newBackend(cfg) - }() - select { - case be := <-beOpened: - return be - case <-time.After(time.Second): - plog.Warningf("another etcd process is using %q and holds the file lock.", fn) - plog.Warningf("waiting for it to exit before starting...") - } - return <-beOpened -} - -// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes -// before updating the backend db after persisting raft snapshot to disk, -// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this -// case, replace the db with the snapshot db sent by the leader. -func recoverSnapshotBackend(cfg *ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) { - var cIndex consistentIndex - kv := mvcc.New(oldbe, &lease.FakeLessor{}, &cIndex) - defer kv.Close() - if snapshot.Metadata.Index <= kv.ConsistentIndex() { - return oldbe, nil - } - oldbe.Close() - return openSnapshotBackend(cfg, snap.New(cfg.SnapDir()), snapshot) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go deleted file mode 100644 index f44862a46380e..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "sort" - "time" - - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/version" - "github.com/coreos/go-semver/semver" -) - -// isMemberBootstrapped tries to check if the given member has been bootstrapped -// in the given cluster. -func isMemberBootstrapped(cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool { - rcl, err := getClusterFromRemotePeers(getRemotePeerURLs(cl, member), timeout, false, rt) - if err != nil { - return false - } - id := cl.MemberByName(member).ID - m := rcl.Member(id) - if m == nil { - return false - } - if len(m.ClientURLs) > 0 { - return true - } - return false -} - -// GetClusterFromRemotePeers takes a set of URLs representing etcd peers, and -// attempts to construct a Cluster by accessing the members endpoint on one of -// these URLs. The first URL to provide a response is used. If no URLs provide -// a response, or a Cluster cannot be successfully created from a received -// response, an error is returned. -// Each request has a 10-second timeout. Because the upper limit of TTL is 5s, -// 10 second is enough for building connection and finishing request. -func GetClusterFromRemotePeers(urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) { - return getClusterFromRemotePeers(urls, 10*time.Second, true, rt) -} - -// If logerr is true, it prints out more error messages. -func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) { - cc := &http.Client{ - Transport: rt, - Timeout: timeout, - } - for _, u := range urls { - resp, err := cc.Get(u + "/members") - if err != nil { - if logerr { - plog.Warningf("could not get cluster response from %s: %v", u, err) - } - continue - } - b, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - if logerr { - plog.Warningf("could not read the body of cluster response: %v", err) - } - continue - } - var membs []*membership.Member - if err = json.Unmarshal(b, &membs); err != nil { - if logerr { - plog.Warningf("could not unmarshal cluster response: %v", err) - } - continue - } - id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID")) - if err != nil { - if logerr { - plog.Warningf("could not parse the cluster ID from cluster res: %v", err) - } - continue - } - - // check the length of membership members - // if the membership members are present then prepare and return raft cluster - // if membership members are not present then the raft cluster formed will be - // an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error - if len(membs) > 0 { - return membership.NewClusterFromMembers("", id, membs), nil - } - - return nil, fmt.Errorf("failed to get raft cluster member(s) from the given urls.") - } - return nil, fmt.Errorf("could not retrieve cluster information from the given urls") -} - -// getRemotePeerURLs returns peer urls of remote members in the cluster. The -// returned list is sorted in ascending lexicographical order. -func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string { - us := make([]string, 0) - for _, m := range cl.Members() { - if m.Name == local { - continue - } - us = append(us, m.PeerURLs...) - } - sort.Strings(us) - return us -} - -// getVersions returns the versions of the members in the given cluster. -// The key of the returned map is the member's ID. The value of the returned map -// is the semver versions string, including server and cluster. -// If it fails to get the version of a member, the key will be nil. -func getVersions(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions { - members := cl.Members() - vers := make(map[string]*version.Versions) - for _, m := range members { - if m.ID == local { - cv := "not_decided" - if cl.Version() != nil { - cv = cl.Version().String() - } - vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv} - continue - } - ver, err := getVersion(m, rt) - if err != nil { - plog.Warningf("cannot get the version of member %s (%v)", m.ID, err) - vers[m.ID.String()] = nil - } else { - vers[m.ID.String()] = ver - } - } - return vers -} - -// decideClusterVersion decides the cluster version based on the versions map. -// The returned version is the min server version in the map, or nil if the min -// version in unknown. -func decideClusterVersion(vers map[string]*version.Versions) *semver.Version { - var cv *semver.Version - lv := semver.Must(semver.NewVersion(version.Version)) - - for mid, ver := range vers { - if ver == nil { - return nil - } - v, err := semver.NewVersion(ver.Server) - if err != nil { - plog.Errorf("cannot understand the version of member %s (%v)", mid, err) - return nil - } - if lv.LessThan(*v) { - plog.Warningf("the local etcd version %s is not up-to-date", lv.String()) - plog.Warningf("member %s has a higher version %s", mid, ver.Server) - } - if cv == nil { - cv = v - } else if v.LessThan(*cv) { - cv = v - } - } - return cv -} - -// isCompatibleWithCluster return true if the local member has a compatible version with -// the current running cluster. -// The version is considered as compatible when at least one of the other members in the cluster has a -// cluster version in the range of [MinClusterVersion, Version] and no known members has a cluster version -// out of the range. -// We set this rule since when the local member joins, another member might be offline. -func isCompatibleWithCluster(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool { - vers := getVersions(cl, local, rt) - minV := semver.Must(semver.NewVersion(version.MinClusterVersion)) - maxV := semver.Must(semver.NewVersion(version.Version)) - maxV = &semver.Version{ - Major: maxV.Major, - Minor: maxV.Minor, - } - - return isCompatibleWithVers(vers, local, minV, maxV) -} - -func isCompatibleWithVers(vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool { - var ok bool - for id, v := range vers { - // ignore comparison with local version - if id == local.String() { - continue - } - if v == nil { - continue - } - clusterv, err := semver.NewVersion(v.Cluster) - if err != nil { - plog.Errorf("cannot understand the cluster version of member %s (%v)", id, err) - continue - } - if clusterv.LessThan(*minV) { - plog.Warningf("the running cluster version(%v) is lower than the minimal cluster version(%v) supported", clusterv.String(), minV.String()) - return false - } - if maxV.LessThan(*clusterv) { - plog.Warningf("the running cluster version(%v) is higher than the maximum cluster version(%v) supported", clusterv.String(), maxV.String()) - return false - } - ok = true - } - return ok -} - -// getVersion returns the Versions of the given member via its -// peerURLs. Returns the last error if it fails to get the version. -func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions, error) { - cc := &http.Client{ - Transport: rt, - } - var ( - err error - resp *http.Response - ) - - for _, u := range m.PeerURLs { - resp, err = cc.Get(u + "/version") - if err != nil { - plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err) - continue - } - var b []byte - b, err = ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - plog.Warningf("failed to read out the response body from the peerURL(%s) of member %s (%v)", u, m.ID, err) - continue - } - var vers version.Versions - if err = json.Unmarshal(b, &vers); err != nil { - plog.Warningf("failed to unmarshal the response body got from the peerURL(%s) of member %s (%v)", u, m.ID, err) - continue - } - return &vers, nil - } - return nil, err -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/config.go b/vendor/github.com/coreos/etcd/etcdserver/config.go deleted file mode 100644 index ae8a4d08e355a..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/config.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "fmt" - "path/filepath" - "sort" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/coreos/etcd/pkg/netutil" - "github.com/coreos/etcd/pkg/transport" - "github.com/coreos/etcd/pkg/types" -) - -// ServerConfig holds the configuration of etcd as taken from the command line or discovery. -type ServerConfig struct { - Name string - DiscoveryURL string - DiscoveryProxy string - ClientURLs types.URLs - PeerURLs types.URLs - DataDir string - // DedicatedWALDir config will make the etcd to write the WAL to the WALDir - // rather than the dataDir/member/wal. - DedicatedWALDir string - SnapCount uint64 - MaxSnapFiles uint - MaxWALFiles uint - InitialPeerURLsMap types.URLsMap - InitialClusterToken string - NewCluster bool - ForceNewCluster bool - PeerTLSInfo transport.TLSInfo - - TickMs uint - ElectionTicks int - BootstrapTimeout time.Duration - - AutoCompactionRetention int - QuotaBackendBytes int64 - - // MaxRequestBytes is the maximum request size to send over raft. - MaxRequestBytes uint - - StrictReconfigCheck bool - - // ClientCertAuthEnabled is true when cert has been signed by the client CA. - ClientCertAuthEnabled bool - - AuthToken string - - Debug bool -} - -// VerifyBootstrap sanity-checks the initial config for bootstrap case -// and returns an error for things that should never happen. -func (c *ServerConfig) VerifyBootstrap() error { - if err := c.hasLocalMember(); err != nil { - return err - } - if err := c.advertiseMatchesCluster(); err != nil { - return err - } - if checkDuplicateURL(c.InitialPeerURLsMap) { - return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap) - } - if c.InitialPeerURLsMap.String() == "" && c.DiscoveryURL == "" { - return fmt.Errorf("initial cluster unset and no discovery URL found") - } - return nil -} - -// VerifyJoinExisting sanity-checks the initial config for join existing cluster -// case and returns an error for things that should never happen. -func (c *ServerConfig) VerifyJoinExisting() error { - // The member has announced its peer urls to the cluster before starting; no need to - // set the configuration again. - if err := c.hasLocalMember(); err != nil { - return err - } - if checkDuplicateURL(c.InitialPeerURLsMap) { - return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap) - } - if c.DiscoveryURL != "" { - return fmt.Errorf("discovery URL should not be set when joining existing initial cluster") - } - return nil -} - -// hasLocalMember checks that the cluster at least contains the local server. -func (c *ServerConfig) hasLocalMember() error { - if urls := c.InitialPeerURLsMap[c.Name]; urls == nil { - return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name) - } - return nil -} - -// advertiseMatchesCluster confirms peer URLs match those in the cluster peer list. -func (c *ServerConfig) advertiseMatchesCluster() error { - urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice() - urls.Sort() - sort.Strings(apurls) - ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) - defer cancel() - if !netutil.URLStringsEqual(ctx, apurls, urls.StringSlice()) { - umap := map[string]types.URLs{c.Name: c.PeerURLs} - return fmt.Errorf("--initial-cluster must include %s given --initial-advertise-peer-urls=%s", types.URLsMap(umap).String(), strings.Join(apurls, ",")) - } - return nil -} - -func (c *ServerConfig) MemberDir() string { return filepath.Join(c.DataDir, "member") } - -func (c *ServerConfig) WALDir() string { - if c.DedicatedWALDir != "" { - return c.DedicatedWALDir - } - return filepath.Join(c.MemberDir(), "wal") -} - -func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") } - -func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" } - -// ReqTimeout returns timeout for request to finish. -func (c *ServerConfig) ReqTimeout() time.Duration { - // 5s for queue waiting, computation and disk IO delay - // + 2 * election timeout for possible leader election - return 5*time.Second + 2*time.Duration(c.ElectionTicks)*time.Duration(c.TickMs)*time.Millisecond -} - -func (c *ServerConfig) electionTimeout() time.Duration { - return time.Duration(c.ElectionTicks) * time.Duration(c.TickMs) * time.Millisecond -} - -func (c *ServerConfig) peerDialTimeout() time.Duration { - // 1s for queue wait and system delay - // + one RTT, which is smaller than 1/5 election timeout - return time.Second + time.Duration(c.ElectionTicks)*time.Duration(c.TickMs)*time.Millisecond/5 -} - -func (c *ServerConfig) PrintWithInitial() { c.print(true) } - -func (c *ServerConfig) Print() { c.print(false) } - -func (c *ServerConfig) print(initial bool) { - plog.Infof("name = %s", c.Name) - if c.ForceNewCluster { - plog.Infof("force new cluster") - } - plog.Infof("data dir = %s", c.DataDir) - plog.Infof("member dir = %s", c.MemberDir()) - if c.DedicatedWALDir != "" { - plog.Infof("dedicated WAL dir = %s", c.DedicatedWALDir) - } - plog.Infof("heartbeat = %dms", c.TickMs) - plog.Infof("election = %dms", c.ElectionTicks*int(c.TickMs)) - plog.Infof("snapshot count = %d", c.SnapCount) - if len(c.DiscoveryURL) != 0 { - plog.Infof("discovery URL= %s", c.DiscoveryURL) - if len(c.DiscoveryProxy) != 0 { - plog.Infof("discovery proxy = %s", c.DiscoveryProxy) - } - } - plog.Infof("advertise client URLs = %s", c.ClientURLs) - if initial { - plog.Infof("initial advertise peer URLs = %s", c.PeerURLs) - plog.Infof("initial cluster = %s", c.InitialPeerURLsMap) - } -} - -func checkDuplicateURL(urlsmap types.URLsMap) bool { - um := make(map[string]bool) - for _, urls := range urlsmap { - for _, url := range urls { - u := url.String() - if um[u] { - return true - } - um[u] = true - } - } - return false -} - -func (c *ServerConfig) bootstrapTimeout() time.Duration { - if c.BootstrapTimeout != 0 { - return c.BootstrapTimeout - } - return time.Second -} - -func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") } diff --git a/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go b/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go deleted file mode 100644 index d513f6708d333..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "sync/atomic" -) - -// consistentIndex represents the offset of an entry in a consistent replica log. -// It implements the mvcc.ConsistentIndexGetter interface. -// It is always set to the offset of current entry before executing the entry, -// so ConsistentWatchableKV could get the consistent index from it. -type consistentIndex uint64 - -func (i *consistentIndex) setConsistentIndex(v uint64) { - atomic.StoreUint64((*uint64)(i), v) -} - -func (i *consistentIndex) ConsistentIndex() uint64 { - return atomic.LoadUint64((*uint64)(i)) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/doc.go b/vendor/github.com/coreos/etcd/etcdserver/doc.go deleted file mode 100644 index b195d2d167a63..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package etcdserver defines how etcd servers interact and store their states. -package etcdserver diff --git a/vendor/github.com/coreos/etcd/etcdserver/errors.go b/vendor/github.com/coreos/etcd/etcdserver/errors.go deleted file mode 100644 index ed749dbe8d880..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/errors.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "errors" - "fmt" -) - -var ( - ErrUnknownMethod = errors.New("etcdserver: unknown method") - ErrStopped = errors.New("etcdserver: server stopped") - ErrCanceled = errors.New("etcdserver: request cancelled") - ErrTimeout = errors.New("etcdserver: request timed out") - ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure") - ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost") - ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long") - ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members") - ErrNoLeader = errors.New("etcdserver: no leader") - ErrRequestTooLarge = errors.New("etcdserver: request is too large") - ErrNoSpace = errors.New("etcdserver: no space") - ErrTooManyRequests = errors.New("etcdserver: too many requests") - ErrUnhealthy = errors.New("etcdserver: unhealthy cluster") - ErrKeyNotFound = errors.New("etcdserver: key not found") -) - -type DiscoveryError struct { - Op string - Err error -} - -func (e DiscoveryError) Error() string { - return fmt.Sprintf("failed to %s discovery cluster (%v)", e.Op, e.Err) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/metrics.go deleted file mode 100644 index 90bbd3632a6e6..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/metrics.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "time" - - "github.com/coreos/etcd/pkg/runtime" - "github.com/prometheus/client_golang/prometheus" -) - -var ( - hasLeader = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "has_leader", - Help: "Whether or not a leader exists. 1 is existence, 0 is not.", - }) - leaderChanges = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "leader_changes_seen_total", - Help: "The number of leader changes seen.", - }) - proposalsCommitted = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "proposals_committed_total", - Help: "The total number of consensus proposals committed.", - }) - proposalsApplied = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "proposals_applied_total", - Help: "The total number of consensus proposals applied.", - }) - proposalsPending = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "proposals_pending", - Help: "The current number of pending proposals to commit.", - }) - proposalsFailed = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "proposals_failed_total", - Help: "The total number of failed proposals seen.", - }) - leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "server", - Name: "lease_expired_total", - Help: "The total number of expired leases.", - }) -) - -func init() { - prometheus.MustRegister(hasLeader) - prometheus.MustRegister(leaderChanges) - prometheus.MustRegister(proposalsCommitted) - prometheus.MustRegister(proposalsApplied) - prometheus.MustRegister(proposalsPending) - prometheus.MustRegister(proposalsFailed) - prometheus.MustRegister(leaseExpired) -} - -func monitorFileDescriptor(done <-chan struct{}) { - ticker := time.NewTicker(5 * time.Second) - defer ticker.Stop() - for { - used, err := runtime.FDUsage() - if err != nil { - plog.Errorf("cannot monitor file descriptor usage (%v)", err) - return - } - limit, err := runtime.FDLimit() - if err != nil { - plog.Errorf("cannot monitor file descriptor usage (%v)", err) - return - } - if used >= limit/5*4 { - plog.Warningf("80%% of the file descriptor limit is used [used = %d, limit = %d]", used, limit) - } - select { - case <-ticker.C: - case <-done: - return - } - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/quota.go b/vendor/github.com/coreos/etcd/etcdserver/quota.go deleted file mode 100644 index 87126f1564c17..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/quota.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -const ( - // DefaultQuotaBytes is the number of bytes the backend Size may - // consume before exceeding the space quota. - DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB - // MaxQuotaBytes is the maximum number of bytes suggested for a backend - // quota. A larger quota may lead to degraded performance. - MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB -) - -// Quota represents an arbitrary quota against arbitrary requests. Each request -// costs some charge; if there is not enough remaining charge, then there are -// too few resources available within the quota to apply the request. -type Quota interface { - // Available judges whether the given request fits within the quota. - Available(req interface{}) bool - // Cost computes the charge against the quota for a given request. - Cost(req interface{}) int - // Remaining is the amount of charge left for the quota. - Remaining() int64 -} - -type passthroughQuota struct{} - -func (*passthroughQuota) Available(interface{}) bool { return true } -func (*passthroughQuota) Cost(interface{}) int { return 0 } -func (*passthroughQuota) Remaining() int64 { return 1 } - -type backendQuota struct { - s *EtcdServer - maxBackendBytes int64 -} - -const ( - // leaseOverhead is an estimate for the cost of storing a lease - leaseOverhead = 64 - // kvOverhead is an estimate for the cost of storing a key's metadata - kvOverhead = 256 -) - -func NewBackendQuota(s *EtcdServer) Quota { - if s.Cfg.QuotaBackendBytes < 0 { - // disable quotas if negative - plog.Warningf("disabling backend quota") - return &passthroughQuota{} - } - if s.Cfg.QuotaBackendBytes == 0 { - // use default size if no quota size given - return &backendQuota{s, DefaultQuotaBytes} - } - if s.Cfg.QuotaBackendBytes > MaxQuotaBytes { - plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes) - } - return &backendQuota{s, s.Cfg.QuotaBackendBytes} -} - -func (b *backendQuota) Available(v interface{}) bool { - // TODO: maybe optimize backend.Size() - return b.s.Backend().Size()+int64(b.Cost(v)) < b.maxBackendBytes -} - -func (b *backendQuota) Cost(v interface{}) int { - switch r := v.(type) { - case *pb.PutRequest: - return costPut(r) - case *pb.TxnRequest: - return costTxn(r) - case *pb.LeaseGrantRequest: - return leaseOverhead - default: - panic("unexpected cost") - } -} - -func costPut(r *pb.PutRequest) int { return kvOverhead + len(r.Key) + len(r.Value) } - -func costTxnReq(u *pb.RequestOp) int { - r := u.GetRequestPut() - if r == nil { - return 0 - } - return costPut(r) -} - -func costTxn(r *pb.TxnRequest) int { - sizeSuccess := 0 - for _, u := range r.Success { - sizeSuccess += costTxnReq(u) - } - sizeFailure := 0 - for _, u := range r.Failure { - sizeFailure += costTxnReq(u) - } - if sizeFailure > sizeSuccess { - return sizeFailure - } - return sizeSuccess -} - -func (b *backendQuota) Remaining() int64 { - return b.maxBackendBytes - b.s.Backend().Size() -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/raft.go b/vendor/github.com/coreos/etcd/etcdserver/raft.go deleted file mode 100644 index dcb894f82fb18..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/raft.go +++ /dev/null @@ -1,594 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "encoding/json" - "expvar" - "sort" - "sync" - "sync/atomic" - "time" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/contention" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/rafthttp" - "github.com/coreos/etcd/wal" - "github.com/coreos/etcd/wal/walpb" - "github.com/coreos/pkg/capnslog" -) - -const ( - // Number of entries for slow follower to catch-up after compacting - // the raft storage entries. - // We expect the follower has a millisecond level latency with the leader. - // The max throughput is around 10K. Keep a 5K entries is enough for helping - // follower to catch up. - numberOfCatchUpEntries = 5000 - - // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value). - // Assuming the RTT is around 10ms, 1MB max size is large enough. - maxSizePerMsg = 1 * 1024 * 1024 - // Never overflow the rafthttp buffer, which is 4096. - // TODO: a better const? - maxInflightMsgs = 4096 / 8 -) - -var ( - // protects raftStatus - raftStatusMu sync.Mutex - // indirection for expvar func interface - // expvar panics when publishing duplicate name - // expvar does not support remove a registered name - // so only register a func that calls raftStatus - // and change raftStatus as we need. - raftStatus func() raft.Status -) - -func init() { - raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft")) - expvar.Publish("raft.status", expvar.Func(func() interface{} { - raftStatusMu.Lock() - defer raftStatusMu.Unlock() - return raftStatus() - })) -} - -type RaftTimer interface { - Index() uint64 - Term() uint64 -} - -// apply contains entries, snapshot to be applied. Once -// an apply is consumed, the entries will be persisted to -// to raft storage concurrently; the application must read -// raftDone before assuming the raft messages are stable. -type apply struct { - entries []raftpb.Entry - snapshot raftpb.Snapshot - // notifyc synchronizes etcd server applies with the raft node - notifyc chan struct{} -} - -type raftNode struct { - // Cache of the latest raft index and raft term the server has seen. - // These three unit64 fields must be the first elements to keep 64-bit - // alignment for atomic access to the fields. - index uint64 - term uint64 - lead uint64 - - raftNodeConfig - - // a chan to send/receive snapshot - msgSnapC chan raftpb.Message - - // a chan to send out apply - applyc chan apply - - // a chan to send out readState - readStateC chan raft.ReadState - - // utility - ticker *time.Ticker - // contention detectors for raft heartbeat message - td *contention.TimeoutDetector - - stopped chan struct{} - done chan struct{} -} - -type raftNodeConfig struct { - // to check if msg receiver is removed from cluster - isIDRemoved func(id uint64) bool - raft.Node - raftStorage *raft.MemoryStorage - storage Storage - heartbeat time.Duration // for logging - // transport specifies the transport to send and receive msgs to members. - // Sending messages MUST NOT block. It is okay to drop messages, since - // clients should timeout and reissue their messages. - // If transport is nil, server will panic. - transport rafthttp.Transporter -} - -func newRaftNode(cfg raftNodeConfig) *raftNode { - r := &raftNode{ - raftNodeConfig: cfg, - // set up contention detectors for raft heartbeat message. - // expect to send a heartbeat within 2 heartbeat intervals. - td: contention.NewTimeoutDetector(2 * cfg.heartbeat), - readStateC: make(chan raft.ReadState, 1), - msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), - applyc: make(chan apply), - stopped: make(chan struct{}), - done: make(chan struct{}), - } - if r.heartbeat == 0 { - r.ticker = &time.Ticker{} - } else { - r.ticker = time.NewTicker(r.heartbeat) - } - return r -} - -// start prepares and starts raftNode in a new goroutine. It is no longer safe -// to modify the fields after it has been started. -func (r *raftNode) start(rh *raftReadyHandler) { - internalTimeout := time.Second - - go func() { - defer r.onStop() - islead := false - - for { - select { - case <-r.ticker.C: - r.Tick() - case rd := <-r.Ready(): - if rd.SoftState != nil { - newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead - if newLeader { - leaderChanges.Inc() - } - - if rd.SoftState.Lead == raft.None { - hasLeader.Set(0) - } else { - hasLeader.Set(1) - } - - atomic.StoreUint64(&r.lead, rd.SoftState.Lead) - islead = rd.RaftState == raft.StateLeader - rh.updateLeadership(newLeader) - r.td.Reset() - } - - if len(rd.ReadStates) != 0 { - select { - case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]: - case <-time.After(internalTimeout): - plog.Warningf("timed out sending read state") - case <-r.stopped: - return - } - } - - notifyc := make(chan struct{}, 1) - ap := apply{ - entries: rd.CommittedEntries, - snapshot: rd.Snapshot, - notifyc: notifyc, - } - - updateCommittedIndex(&ap, rh) - - select { - case r.applyc <- ap: - case <-r.stopped: - return - } - - // the leader can write to its disk in parallel with replicating to the followers and them - // writing to their disks. - // For more details, check raft thesis 10.2.1 - if islead { - // gofail: var raftBeforeLeaderSend struct{} - r.transport.Send(r.processMessages(rd.Messages)) - } - - // gofail: var raftBeforeSave struct{} - if err := r.storage.Save(rd.HardState, rd.Entries); err != nil { - plog.Fatalf("raft save state and entries error: %v", err) - } - if !raft.IsEmptyHardState(rd.HardState) { - proposalsCommitted.Set(float64(rd.HardState.Commit)) - } - // gofail: var raftAfterSave struct{} - - if !raft.IsEmptySnap(rd.Snapshot) { - // gofail: var raftBeforeSaveSnap struct{} - if err := r.storage.SaveSnap(rd.Snapshot); err != nil { - plog.Fatalf("raft save snapshot error: %v", err) - } - // etcdserver now claim the snapshot has been persisted onto the disk - notifyc <- struct{}{} - - // gofail: var raftAfterSaveSnap struct{} - r.raftStorage.ApplySnapshot(rd.Snapshot) - plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index) - // gofail: var raftAfterApplySnap struct{} - } - - r.raftStorage.Append(rd.Entries) - - if !islead { - // finish processing incoming messages before we signal raftdone chan - msgs := r.processMessages(rd.Messages) - - // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots - notifyc <- struct{}{} - - // Candidate or follower needs to wait for all pending configuration - // changes to be applied before sending messages. - // Otherwise we might incorrectly count votes (e.g. votes from removed members). - // Also slow machine's follower raft-layer could proceed to become the leader - // on its own single-node cluster, before apply-layer applies the config change. - // We simply wait for ALL pending entries to be applied for now. - // We might improve this later on if it causes unnecessary long blocking issues. - waitApply := false - for _, ent := range rd.CommittedEntries { - if ent.Type == raftpb.EntryConfChange { - waitApply = true - break - } - } - if waitApply { - // blocks until 'applyAll' calls 'applyWait.Trigger' - // to be in sync with scheduled config-change job - // (assume notifyc has cap of 1) - select { - case notifyc <- struct{}{}: - case <-r.stopped: - return - } - } - - // gofail: var raftBeforeFollowerSend struct{} - r.transport.Send(msgs) - } else { - // leader already processed 'MsgSnap' and signaled - notifyc <- struct{}{} - } - - r.Advance() - case <-r.stopped: - return - } - } - }() -} - -func updateCommittedIndex(ap *apply, rh *raftReadyHandler) { - var ci uint64 - if len(ap.entries) != 0 { - ci = ap.entries[len(ap.entries)-1].Index - } - if ap.snapshot.Metadata.Index > ci { - ci = ap.snapshot.Metadata.Index - } - if ci != 0 { - rh.updateCommittedIndex(ci) - } -} - -func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message { - sentAppResp := false - for i := len(ms) - 1; i >= 0; i-- { - if r.isIDRemoved(ms[i].To) { - ms[i].To = 0 - } - - if ms[i].Type == raftpb.MsgAppResp { - if sentAppResp { - ms[i].To = 0 - } else { - sentAppResp = true - } - } - - if ms[i].Type == raftpb.MsgSnap { - // There are two separate data store: the store for v2, and the KV for v3. - // The msgSnap only contains the most recent snapshot of store without KV. - // So we need to redirect the msgSnap to etcd server main loop for merging in the - // current store snapshot and KV snapshot. - select { - case r.msgSnapC <- ms[i]: - default: - // drop msgSnap if the inflight chan if full. - } - ms[i].To = 0 - } - if ms[i].Type == raftpb.MsgHeartbeat { - ok, exceed := r.td.Observe(ms[i].To) - if !ok { - // TODO: limit request rate. - plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed) - plog.Warningf("server is likely overloaded") - } - } - } - return ms -} - -func (r *raftNode) apply() chan apply { - return r.applyc -} - -func (r *raftNode) stop() { - r.stopped <- struct{}{} - <-r.done -} - -func (r *raftNode) onStop() { - r.Stop() - r.ticker.Stop() - r.transport.Stop() - if err := r.storage.Close(); err != nil { - plog.Panicf("raft close storage error: %v", err) - } - close(r.done) -} - -// for testing -func (r *raftNode) pauseSending() { - p := r.transport.(rafthttp.Pausable) - p.Pause() -} - -func (r *raftNode) resumeSending() { - p := r.transport.(rafthttp.Pausable) - p.Resume() -} - -// advanceTicksForElection advances ticks to the node for fast election. -// This reduces the time to wait for first leader election if bootstrapping the whole -// cluster, while leaving at least 1 heartbeat for possible existing leader -// to contact it. -func advanceTicksForElection(n raft.Node, electionTicks int) { - for i := 0; i < electionTicks-1; i++ { - n.Tick() - } -} - -func startNode(cfg *ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) { - var err error - member := cl.MemberByName(cfg.Name) - metadata := pbutil.MustMarshal( - &pb.Metadata{ - NodeID: uint64(member.ID), - ClusterID: uint64(cl.ID()), - }, - ) - if w, err = wal.Create(cfg.WALDir(), metadata); err != nil { - plog.Fatalf("create wal error: %v", err) - } - peers := make([]raft.Peer, len(ids)) - for i, id := range ids { - ctx, err := json.Marshal((*cl).Member(id)) - if err != nil { - plog.Panicf("marshal member should never fail: %v", err) - } - peers[i] = raft.Peer{ID: uint64(id), Context: ctx} - } - id = member.ID - plog.Infof("starting member %s in cluster %s", id, cl.ID()) - s = raft.NewMemoryStorage() - c := &raft.Config{ - ID: uint64(id), - ElectionTick: cfg.ElectionTicks, - HeartbeatTick: 1, - Storage: s, - MaxSizePerMsg: maxSizePerMsg, - MaxInflightMsgs: maxInflightMsgs, - CheckQuorum: true, - } - - n = raft.StartNode(c, peers) - raftStatusMu.Lock() - raftStatus = n.Status - raftStatusMu.Unlock() - advanceTicksForElection(n, c.ElectionTick) - return -} - -func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) { - var walsnap walpb.Snapshot - if snapshot != nil { - walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term - } - w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap) - - plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit) - cl := membership.NewCluster("") - cl.SetID(cid) - s := raft.NewMemoryStorage() - if snapshot != nil { - s.ApplySnapshot(*snapshot) - } - s.SetHardState(st) - s.Append(ents) - c := &raft.Config{ - ID: uint64(id), - ElectionTick: cfg.ElectionTicks, - HeartbeatTick: 1, - Storage: s, - MaxSizePerMsg: maxSizePerMsg, - MaxInflightMsgs: maxInflightMsgs, - CheckQuorum: true, - } - - n := raft.RestartNode(c) - raftStatusMu.Lock() - raftStatus = n.Status - raftStatusMu.Unlock() - advanceTicksForElection(n, c.ElectionTick) - return id, cl, n, s, w -} - -func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) { - var walsnap walpb.Snapshot - if snapshot != nil { - walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term - } - w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap) - - // discard the previously uncommitted entries - for i, ent := range ents { - if ent.Index > st.Commit { - plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i) - ents = ents[:i] - break - } - } - - // force append the configuration change entries - toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit) - ents = append(ents, toAppEnts...) - - // force commit newly appended entries - err := w.Save(raftpb.HardState{}, toAppEnts) - if err != nil { - plog.Fatalf("%v", err) - } - if len(ents) != 0 { - st.Commit = ents[len(ents)-1].Index - } - - plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit) - cl := membership.NewCluster("") - cl.SetID(cid) - s := raft.NewMemoryStorage() - if snapshot != nil { - s.ApplySnapshot(*snapshot) - } - s.SetHardState(st) - s.Append(ents) - c := &raft.Config{ - ID: uint64(id), - ElectionTick: cfg.ElectionTicks, - HeartbeatTick: 1, - Storage: s, - MaxSizePerMsg: maxSizePerMsg, - MaxInflightMsgs: maxInflightMsgs, - } - n := raft.RestartNode(c) - raftStatus = n.Status - return id, cl, n, s, w -} - -// getIDs returns an ordered set of IDs included in the given snapshot and -// the entries. The given snapshot/entries can contain two kinds of -// ID-related entry: -// - ConfChangeAddNode, in which case the contained ID will be added into the set. -// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set. -func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { - ids := make(map[uint64]bool) - if snap != nil { - for _, id := range snap.Metadata.ConfState.Nodes { - ids[id] = true - } - } - for _, e := range ents { - if e.Type != raftpb.EntryConfChange { - continue - } - var cc raftpb.ConfChange - pbutil.MustUnmarshal(&cc, e.Data) - switch cc.Type { - case raftpb.ConfChangeAddNode: - ids[cc.NodeID] = true - case raftpb.ConfChangeRemoveNode: - delete(ids, cc.NodeID) - case raftpb.ConfChangeUpdateNode: - // do nothing - default: - plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!") - } - } - sids := make(types.Uint64Slice, 0, len(ids)) - for id := range ids { - sids = append(sids, id) - } - sort.Sort(sids) - return []uint64(sids) -} - -// createConfigChangeEnts creates a series of Raft entries (i.e. -// EntryConfChange) to remove the set of given IDs from the cluster. The ID -// `self` is _not_ removed, even if present in the set. -// If `self` is not inside the given ids, it creates a Raft entry to add a -// default member with the given `self`. -func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry { - ents := make([]raftpb.Entry, 0) - next := index + 1 - found := false - for _, id := range ids { - if id == self { - found = true - continue - } - cc := &raftpb.ConfChange{ - Type: raftpb.ConfChangeRemoveNode, - NodeID: id, - } - e := raftpb.Entry{ - Type: raftpb.EntryConfChange, - Data: pbutil.MustMarshal(cc), - Term: term, - Index: next, - } - ents = append(ents, e) - next++ - } - if !found { - m := membership.Member{ - ID: types.ID(self), - RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}}, - } - ctx, err := json.Marshal(m) - if err != nil { - plog.Panicf("marshal member should never fail: %v", err) - } - cc := &raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: self, - Context: ctx, - } - e := raftpb.Entry{ - Type: raftpb.EntryConfChange, - Data: pbutil.MustMarshal(cc), - Term: term, - Index: next, - } - ents = append(ents, e) - } - return ents -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/server.go b/vendor/github.com/coreos/etcd/etcdserver/server.go deleted file mode 100644 index 271c5e7731373..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/server.go +++ /dev/null @@ -1,1664 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "encoding/json" - "expvar" - "fmt" - "math" - "math/rand" - "net/http" - "os" - "path" - "regexp" - "sync" - "sync/atomic" - "time" - - "github.com/coreos/etcd/alarm" - "github.com/coreos/etcd/auth" - "github.com/coreos/etcd/compactor" - "github.com/coreos/etcd/discovery" - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/etcdserver/stats" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/fileutil" - "github.com/coreos/etcd/pkg/idutil" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/pkg/runtime" - "github.com/coreos/etcd/pkg/schedule" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/pkg/wait" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/rafthttp" - "github.com/coreos/etcd/snap" - "github.com/coreos/etcd/store" - "github.com/coreos/etcd/version" - "github.com/coreos/etcd/wal" - "github.com/coreos/go-semver/semver" - "github.com/coreos/pkg/capnslog" - "golang.org/x/net/context" -) - -const ( - DefaultSnapCount = 100000 - - StoreClusterPrefix = "/0" - StoreKeysPrefix = "/1" - - // HealthInterval is the minimum time the cluster should be healthy - // before accepting add member requests. - HealthInterval = 5 * time.Second - - purgeFileInterval = 30 * time.Second - // monitorVersionInterval should be smaller than the timeout - // on the connection. Or we will not be able to reuse the connection - // (since it will timeout). - monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second - - // max number of in-flight snapshot messages etcdserver allows to have - // This number is more than enough for most clusters with 5 machines. - maxInFlightMsgSnap = 16 - - releaseDelayAfterSnapshot = 30 * time.Second - - // maxPendingRevokes is the maximum number of outstanding expired lease revocations. - maxPendingRevokes = 16 - recommendedMaxRequestBytes = 10 * 1024 * 1024 -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver") - - storeMemberAttributeRegexp = regexp.MustCompile(path.Join(membership.StoreMembersPrefix, "[[:xdigit:]]{1,16}", "attributes")) -) - -func init() { - rand.Seed(time.Now().UnixNano()) - - expvar.Publish( - "file_descriptor_limit", - expvar.Func( - func() interface{} { - n, _ := runtime.FDLimit() - return n - }, - ), - ) -} - -type Response struct { - Event *store.Event - Watcher store.Watcher - err error -} - -type Server interface { - // Start performs any initialization of the Server necessary for it to - // begin serving requests. It must be called before Do or Process. - // Start must be non-blocking; any long-running server functionality - // should be implemented in goroutines. - Start() - // Stop terminates the Server and performs any necessary finalization. - // Do and Process cannot be called after Stop has been invoked. - Stop() - // ID returns the ID of the Server. - ID() types.ID - // Leader returns the ID of the leader Server. - Leader() types.ID - // Do takes a request and attempts to fulfill it, returning a Response. - Do(ctx context.Context, r pb.Request) (Response, error) - // Process takes a raft message and applies it to the server's raft state - // machine, respecting any timeout of the given context. - Process(ctx context.Context, m raftpb.Message) error - // AddMember attempts to add a member into the cluster. It will return - // ErrIDRemoved if member ID is removed from the cluster, or return - // ErrIDExists if member ID exists in the cluster. - AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) - // RemoveMember attempts to remove a member from the cluster. It will - // return ErrIDRemoved if member ID is removed from the cluster, or return - // ErrIDNotFound if member ID is not in the cluster. - RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) - - // UpdateMember attempts to update an existing member in the cluster. It will - // return ErrIDNotFound if the member ID does not exist. - UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error) - - // ClusterVersion is the cluster-wide minimum major.minor version. - // Cluster version is set to the min version that an etcd member is - // compatible with when first bootstrap. - // - // ClusterVersion is nil until the cluster is bootstrapped (has a quorum). - // - // During a rolling upgrades, the ClusterVersion will be updated - // automatically after a sync. (5 second by default) - // - // The API/raft component can utilize ClusterVersion to determine if - // it can accept a client request or a raft RPC. - // NOTE: ClusterVersion might be nil when etcd 2.1 works with etcd 2.0 and - // the leader is etcd 2.0. etcd 2.0 leader will not update clusterVersion since - // this feature is introduced post 2.0. - ClusterVersion() *semver.Version -} - -// EtcdServer is the production implementation of the Server interface -type EtcdServer struct { - // inflightSnapshots holds count the number of snapshots currently inflight. - inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned. - appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned. - committedIndex uint64 // must use atomic operations to access; keep 64-bit aligned. - // consistIndex used to hold the offset of current executing entry - // It is initialized to 0 before executing any entry. - consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned. - Cfg *ServerConfig - - readych chan struct{} - r raftNode - - snapCount uint64 - - w wait.Wait - - readMu sync.RWMutex - // read routine notifies etcd server that it waits for reading by sending an empty struct to - // readwaitC - readwaitc chan struct{} - // readNotifier is used to notify the read routine that it can process the request - // when there is no error - readNotifier *notifier - - // stop signals the run goroutine should shutdown. - stop chan struct{} - // stopping is closed by run goroutine on shutdown. - stopping chan struct{} - // done is closed when all goroutines from start() complete. - done chan struct{} - - errorc chan error - id types.ID - attributes membership.Attributes - - cluster *membership.RaftCluster - - store store.Store - snapshotter *snap.Snapshotter - - applyV2 ApplierV2 - - // applyV3 is the applier with auth and quotas - applyV3 applierV3 - // applyV3Base is the core applier without auth or quotas - applyV3Base applierV3 - applyWait wait.WaitTime - - kv mvcc.ConsistentWatchableKV - lessor lease.Lessor - bemu sync.Mutex - be backend.Backend - authStore auth.AuthStore - alarmStore *alarm.AlarmStore - - stats *stats.ServerStats - lstats *stats.LeaderStats - - SyncTicker *time.Ticker - // compactor is used to auto-compact the KV. - compactor *compactor.Periodic - - // peerRt used to send requests (version, lease) to peers. - peerRt http.RoundTripper - reqIDGen *idutil.Generator - - // forceVersionC is used to force the version monitor loop - // to detect the cluster version immediately. - forceVersionC chan struct{} - - // wgMu blocks concurrent waitgroup mutation while server stopping - wgMu sync.RWMutex - // wg is used to wait for the go routines that depends on the server state - // to exit when stopping the server. - wg sync.WaitGroup - - // ctx is used for etcd-initiated requests that may need to be canceled - // on etcd server shutdown. - ctx context.Context - cancel context.CancelFunc - - leadTimeMu sync.RWMutex - leadElectedTime time.Time -} - -// NewServer creates a new EtcdServer from the supplied configuration. The -// configuration is considered static for the lifetime of the EtcdServer. -func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { - st := store.New(StoreClusterPrefix, StoreKeysPrefix) - - var ( - w *wal.WAL - n raft.Node - s *raft.MemoryStorage - id types.ID - cl *membership.RaftCluster - ) - - if cfg.MaxRequestBytes > recommendedMaxRequestBytes { - plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes) - } - - if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil { - return nil, fmt.Errorf("cannot access data directory: %v", terr) - } - - haveWAL := wal.Exist(cfg.WALDir()) - - if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil { - plog.Fatalf("create snapshot directory error: %v", err) - } - ss := snap.New(cfg.SnapDir()) - - bepath := cfg.backendPath() - beExist := fileutil.Exist(bepath) - be := openBackend(cfg) - - defer func() { - if err != nil { - be.Close() - } - }() - - prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout()) - if err != nil { - return nil, err - } - var ( - remotes []*membership.Member - snapshot *raftpb.Snapshot - ) - - switch { - case !haveWAL && !cfg.NewCluster: - if err = cfg.VerifyJoinExisting(); err != nil { - return nil, err - } - cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap) - if err != nil { - return nil, err - } - existingCluster, gerr := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), prt) - if gerr != nil { - return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr) - } - if err = membership.ValidateClusterAndAssignIDs(cl, existingCluster); err != nil { - return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err) - } - if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, prt) { - return nil, fmt.Errorf("incompatible with current running cluster") - } - - remotes = existingCluster.Members() - cl.SetID(existingCluster.ID()) - cl.SetStore(st) - cl.SetBackend(be) - cfg.Print() - id, n, s, w = startNode(cfg, cl, nil) - case !haveWAL && cfg.NewCluster: - if err = cfg.VerifyBootstrap(); err != nil { - return nil, err - } - cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap) - if err != nil { - return nil, err - } - m := cl.MemberByName(cfg.Name) - if isMemberBootstrapped(cl, cfg.Name, prt, cfg.bootstrapTimeout()) { - return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID) - } - if cfg.ShouldDiscover() { - var str string - str, err = discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String()) - if err != nil { - return nil, &DiscoveryError{Op: "join", Err: err} - } - var urlsmap types.URLsMap - urlsmap, err = types.NewURLsMap(str) - if err != nil { - return nil, err - } - if checkDuplicateURL(urlsmap) { - return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap) - } - if cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, urlsmap); err != nil { - return nil, err - } - } - cl.SetStore(st) - cl.SetBackend(be) - cfg.PrintWithInitial() - id, n, s, w = startNode(cfg, cl, cl.MemberIDs()) - case haveWAL: - if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil { - return nil, fmt.Errorf("cannot write to member directory: %v", err) - } - - if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil { - return nil, fmt.Errorf("cannot write to WAL directory: %v", err) - } - - if cfg.ShouldDiscover() { - plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir()) - } - snapshot, err = ss.Load() - if err != nil && err != snap.ErrNoSnapshot { - return nil, err - } - if snapshot != nil { - if err = st.Recovery(snapshot.Data); err != nil { - plog.Panicf("recovered store from snapshot error: %v", err) - } - plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index) - if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil { - plog.Panicf("recovering backend from snapshot error: %v", err) - } - } - cfg.Print() - if !cfg.ForceNewCluster { - id, cl, n, s, w = restartNode(cfg, snapshot) - } else { - id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot) - } - cl.SetStore(st) - cl.SetBackend(be) - cl.Recover(api.UpdateCapability) - if cl.Version() != nil && !cl.Version().LessThan(semver.Version{Major: 3}) && !beExist { - os.RemoveAll(bepath) - return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath) - } - default: - return nil, fmt.Errorf("unsupported bootstrap config") - } - - if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil { - return nil, fmt.Errorf("cannot access member directory: %v", terr) - } - - sstats := stats.NewServerStats(cfg.Name, id.String()) - lstats := stats.NewLeaderStats(id.String()) - - heartbeat := time.Duration(cfg.TickMs) * time.Millisecond - srv = &EtcdServer{ - readych: make(chan struct{}), - Cfg: cfg, - snapCount: cfg.SnapCount, - errorc: make(chan error, 1), - store: st, - snapshotter: ss, - r: *newRaftNode( - raftNodeConfig{ - isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, - Node: n, - heartbeat: heartbeat, - raftStorage: s, - storage: NewStorage(w, ss), - }, - ), - id: id, - attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, - cluster: cl, - stats: sstats, - lstats: lstats, - SyncTicker: time.NewTicker(500 * time.Millisecond), - peerRt: prt, - reqIDGen: idutil.NewGenerator(uint16(id), time.Now()), - forceVersionC: make(chan struct{}), - } - - srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster} - - srv.be = be - minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat - - // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. - // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. - srv.lessor = lease.NewLessor(srv.be, int64(math.Ceil(minTTL.Seconds()))) - srv.kv = mvcc.New(srv.be, srv.lessor, &srv.consistIndex) - if beExist { - kvindex := srv.kv.ConsistentIndex() - // TODO: remove kvindex != 0 checking when we do not expect users to upgrade - // etcd from pre-3.0 release. - if snapshot != nil && kvindex < snapshot.Metadata.Index { - if kvindex != 0 { - return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d).", bepath, kvindex, snapshot.Metadata.Index) - } - plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index) - } - } - newSrv := srv // since srv == nil in defer if srv is returned as nil - defer func() { - // closing backend without first closing kv can cause - // resumed compactions to fail with closed tx errors - if err != nil { - newSrv.kv.Close() - } - }() - - srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex()) - tp, err := auth.NewTokenProvider(cfg.AuthToken, - func(index uint64) <-chan struct{} { - return srv.applyWait.Wait(index) - }, - ) - if err != nil { - plog.Errorf("failed to create token provider: %s", err) - return nil, err - } - srv.authStore = auth.NewAuthStore(srv.be, tp) - if h := cfg.AutoCompactionRetention; h != 0 { - srv.compactor = compactor.NewPeriodic(h, srv.kv, srv) - srv.compactor.Run() - } - - srv.applyV3Base = &applierV3backend{srv} - if err = srv.restoreAlarms(); err != nil { - return nil, err - } - - // TODO: move transport initialization near the definition of remote - tr := &rafthttp.Transport{ - TLSInfo: cfg.PeerTLSInfo, - DialTimeout: cfg.peerDialTimeout(), - ID: id, - URLs: cfg.PeerURLs, - ClusterID: cl.ID(), - Raft: srv, - Snapshotter: ss, - ServerStats: sstats, - LeaderStats: lstats, - ErrorC: srv.errorc, - } - if err = tr.Start(); err != nil { - return nil, err - } - // add all remotes into transport - for _, m := range remotes { - if m.ID != id { - tr.AddRemote(m.ID, m.PeerURLs) - } - } - for _, m := range cl.Members() { - if m.ID != id { - tr.AddPeer(m.ID, m.PeerURLs) - } - } - srv.r.transport = tr - - return srv, nil -} - -// Start prepares and starts server in a new goroutine. It is no longer safe to -// modify a server's fields after it has been sent to Start. -// It also starts a goroutine to publish its server information. -func (s *EtcdServer) Start() { - s.start() - s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) }) - s.goAttach(s.purgeFile) - s.goAttach(func() { monitorFileDescriptor(s.stopping) }) - s.goAttach(s.monitorVersions) - s.goAttach(s.linearizableReadLoop) -} - -// start prepares and starts server in a new goroutine. It is no longer safe to -// modify a server's fields after it has been sent to Start. -// This function is just used for testing. -func (s *EtcdServer) start() { - if s.snapCount == 0 { - plog.Infof("set snapshot count to default %d", DefaultSnapCount) - s.snapCount = DefaultSnapCount - } - s.w = wait.New() - s.applyWait = wait.NewTimeList() - s.done = make(chan struct{}) - s.stop = make(chan struct{}) - s.stopping = make(chan struct{}) - s.ctx, s.cancel = context.WithCancel(context.Background()) - s.readwaitc = make(chan struct{}, 1) - s.readNotifier = newNotifier() - if s.ClusterVersion() != nil { - plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String())) - } else { - plog.Infof("starting server... [version: %v, cluster version: to_be_decided]", version.Version) - } - // TODO: if this is an empty log, writes all peer infos - // into the first entry - go s.run() -} - -func (s *EtcdServer) purgeFile() { - var serrc, werrc <-chan error - if s.Cfg.MaxSnapFiles > 0 { - serrc = fileutil.PurgeFile(s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done) - } - if s.Cfg.MaxWALFiles > 0 { - werrc = fileutil.PurgeFile(s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.done) - } - select { - case e := <-werrc: - plog.Fatalf("failed to purge wal file %v", e) - case e := <-serrc: - plog.Fatalf("failed to purge snap file %v", e) - case <-s.stopping: - return - } -} - -func (s *EtcdServer) ID() types.ID { return s.id } - -func (s *EtcdServer) Cluster() *membership.RaftCluster { return s.cluster } - -func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() } - -func (s *EtcdServer) Lessor() lease.Lessor { return s.lessor } - -func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) } - -func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error { - if s.cluster.IsIDRemoved(types.ID(m.From)) { - plog.Warningf("reject message from removed member %s", types.ID(m.From).String()) - return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member") - } - if m.Type == raftpb.MsgApp { - s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size()) - } - return s.r.Step(ctx, m) -} - -func (s *EtcdServer) IsIDRemoved(id uint64) bool { return s.cluster.IsIDRemoved(types.ID(id)) } - -func (s *EtcdServer) ReportUnreachable(id uint64) { s.r.ReportUnreachable(id) } - -// ReportSnapshot reports snapshot sent status to the raft state machine, -// and clears the used snapshot from the snapshot store. -func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) { - s.r.ReportSnapshot(id, status) -} - -type etcdProgress struct { - confState raftpb.ConfState - snapi uint64 - appliedt uint64 - appliedi uint64 -} - -// raftReadyHandler contains a set of EtcdServer operations to be called by raftNode, -// and helps decouple state machine logic from Raft algorithms. -// TODO: add a state machine interface to apply the commit entries and do snapshot/recover -type raftReadyHandler struct { - updateLeadership func(newLeader bool) - updateCommittedIndex func(uint64) -} - -func (s *EtcdServer) run() { - sn, err := s.r.raftStorage.Snapshot() - if err != nil { - plog.Panicf("get snapshot from raft storage error: %v", err) - } - - // asynchronously accept apply packets, dispatch progress in-order - sched := schedule.NewFIFOScheduler() - - var ( - smu sync.RWMutex - syncC <-chan time.Time - ) - setSyncC := func(ch <-chan time.Time) { - smu.Lock() - syncC = ch - smu.Unlock() - } - getSyncC := func() (ch <-chan time.Time) { - smu.RLock() - ch = syncC - smu.RUnlock() - return - } - rh := &raftReadyHandler{ - updateLeadership: func(newLeader bool) { - if !s.isLeader() { - if s.lessor != nil { - s.lessor.Demote() - } - if s.compactor != nil { - s.compactor.Pause() - } - setSyncC(nil) - } else { - if newLeader { - t := time.Now() - s.leadTimeMu.Lock() - s.leadElectedTime = t - s.leadTimeMu.Unlock() - } - setSyncC(s.SyncTicker.C) - if s.compactor != nil { - s.compactor.Resume() - } - } - - // TODO: remove the nil checking - // current test utility does not provide the stats - if s.stats != nil { - s.stats.BecomeLeader() - } - }, - updateCommittedIndex: func(ci uint64) { - cci := s.getCommittedIndex() - if ci > cci { - s.setCommittedIndex(ci) - } - }, - } - s.r.start(rh) - - ep := etcdProgress{ - confState: sn.Metadata.ConfState, - snapi: sn.Metadata.Index, - appliedt: sn.Metadata.Term, - appliedi: sn.Metadata.Index, - } - - defer func() { - s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping - close(s.stopping) - s.wgMu.Unlock() - s.cancel() - - sched.Stop() - - // wait for gouroutines before closing raft so wal stays open - s.wg.Wait() - - s.SyncTicker.Stop() - - // must stop raft after scheduler-- etcdserver can leak rafthttp pipelines - // by adding a peer after raft stops the transport - s.r.stop() - - // kv, lessor and backend can be nil if running without v3 enabled - // or running unit tests. - if s.lessor != nil { - s.lessor.Stop() - } - if s.kv != nil { - s.kv.Close() - } - if s.authStore != nil { - s.authStore.Close() - } - if s.be != nil { - s.be.Close() - } - if s.compactor != nil { - s.compactor.Stop() - } - close(s.done) - }() - - var expiredLeaseC <-chan []*lease.Lease - if s.lessor != nil { - expiredLeaseC = s.lessor.ExpiredLeasesC() - } - - for { - select { - case ap := <-s.r.apply(): - f := func(context.Context) { s.applyAll(&ep, &ap) } - sched.Schedule(f) - case leases := <-expiredLeaseC: - s.goAttach(func() { - // Increases throughput of expired leases deletion process through parallelization - c := make(chan struct{}, maxPendingRevokes) - for _, lease := range leases { - select { - case c <- struct{}{}: - case <-s.stopping: - return - } - lid := lease.ID - s.goAttach(func() { - s.LeaseRevoke(s.ctx, &pb.LeaseRevokeRequest{ID: int64(lid)}) - leaseExpired.Inc() - <-c - }) - } - }) - case err := <-s.errorc: - plog.Errorf("%s", err) - plog.Infof("the data-dir used by this member must be removed.") - return - case <-getSyncC(): - if s.store.HasTTLKeys() { - s.sync(s.Cfg.ReqTimeout()) - } - case <-s.stop: - return - } - } -} - -func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) { - s.applySnapshot(ep, apply) - st := time.Now() - s.applyEntries(ep, apply) - d := time.Since(st) - entriesNum := len(apply.entries) - if entriesNum != 0 && d > time.Duration(entriesNum)*warnApplyDuration { - plog.Warningf("apply entries took too long [%v for %d entries]", d, len(apply.entries)) - plog.Warningf("avoid queries with large range/delete range!") - } - proposalsApplied.Set(float64(ep.appliedi)) - s.applyWait.Trigger(ep.appliedi) - // wait for the raft routine to finish the disk writes before triggering a - // snapshot. or applied index might be greater than the last index in raft - // storage, since the raft routine might be slower than apply routine. - <-apply.notifyc - - s.triggerSnapshot(ep) - select { - // snapshot requested via send() - case m := <-s.r.msgSnapC: - merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState) - s.sendMergedSnap(merged) - default: - } -} - -func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { - if raft.IsEmptySnap(apply.snapshot) { - return - } - - plog.Infof("applying snapshot at index %d...", ep.snapi) - defer plog.Infof("finished applying incoming snapshot at index %d", ep.snapi) - - if apply.snapshot.Metadata.Index <= ep.appliedi { - plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1", - apply.snapshot.Metadata.Index, ep.appliedi) - } - - // wait for raftNode to persist snapshot onto the disk - <-apply.notifyc - - newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot) - if err != nil { - plog.Panic(err) - } - - // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. - // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. - if s.lessor != nil { - plog.Info("recovering lessor...") - s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write() }) - plog.Info("finished recovering lessor") - } - - plog.Info("restoring mvcc store...") - - if err := s.kv.Restore(newbe); err != nil { - plog.Panicf("restore KV error: %v", err) - } - s.consistIndex.setConsistentIndex(s.kv.ConsistentIndex()) - - plog.Info("finished restoring mvcc store") - - // Closing old backend might block until all the txns - // on the backend are finished. - // We do not want to wait on closing the old backend. - s.bemu.Lock() - oldbe := s.be - go func() { - plog.Info("closing old backend...") - defer plog.Info("finished closing old backend") - - if err := oldbe.Close(); err != nil { - plog.Panicf("close backend error: %v", err) - } - }() - - s.be = newbe - s.bemu.Unlock() - - plog.Info("recovering alarms...") - if err := s.restoreAlarms(); err != nil { - plog.Panicf("restore alarms error: %v", err) - } - plog.Info("finished recovering alarms") - - if s.authStore != nil { - plog.Info("recovering auth store...") - s.authStore.Recover(newbe) - plog.Info("finished recovering auth store") - } - - plog.Info("recovering store v2...") - if err := s.store.Recovery(apply.snapshot.Data); err != nil { - plog.Panicf("recovery store error: %v", err) - } - plog.Info("finished recovering store v2") - - s.cluster.SetBackend(s.be) - plog.Info("recovering cluster configuration...") - s.cluster.Recover(api.UpdateCapability) - plog.Info("finished recovering cluster configuration") - - plog.Info("removing old peers from network...") - // recover raft transport - s.r.transport.RemoveAllPeers() - plog.Info("finished removing old peers from network") - - plog.Info("adding peers from new cluster configuration into network...") - for _, m := range s.cluster.Members() { - if m.ID == s.ID() { - continue - } - s.r.transport.AddPeer(m.ID, m.PeerURLs) - } - plog.Info("finished adding peers from new cluster configuration into network...") - - ep.appliedt = apply.snapshot.Metadata.Term - ep.appliedi = apply.snapshot.Metadata.Index - ep.snapi = ep.appliedi - ep.confState = apply.snapshot.Metadata.ConfState -} - -func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) { - if len(apply.entries) == 0 { - return - } - firsti := apply.entries[0].Index - if firsti > ep.appliedi+1 { - plog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, ep.appliedi) - } - var ents []raftpb.Entry - if ep.appliedi+1-firsti < uint64(len(apply.entries)) { - ents = apply.entries[ep.appliedi+1-firsti:] - } - if len(ents) == 0 { - return - } - var shouldstop bool - if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop { - go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster")) - } -} - -func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) { - if ep.appliedi-ep.snapi <= s.snapCount { - return - } - - plog.Infof("start to snapshot (applied: %d, lastsnap: %d)", ep.appliedi, ep.snapi) - s.snapshot(ep.appliedi, ep.confState) - ep.snapi = ep.appliedi -} - -func (s *EtcdServer) isMultiNode() bool { - return s.cluster != nil && len(s.cluster.MemberIDs()) > 1 -} - -func (s *EtcdServer) isLeader() bool { - return uint64(s.ID()) == s.Lead() -} - -// transferLeadership transfers the leader to the given transferee. -// TODO: maybe expose to client? -func (s *EtcdServer) transferLeadership(ctx context.Context, lead, transferee uint64) error { - now := time.Now() - interval := time.Duration(s.Cfg.TickMs) * time.Millisecond - - plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee)) - s.r.TransferLeadership(ctx, lead, transferee) - for s.Lead() != transferee { - select { - case <-ctx.Done(): // time out - return ErrTimeoutLeaderTransfer - case <-time.After(interval): - } - } - - // TODO: drain all requests, or drop all messages to the old leader - - plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now)) - return nil -} - -// TransferLeadership transfers the leader to the chosen transferee. -func (s *EtcdServer) TransferLeadership() error { - if !s.isLeader() { - plog.Printf("skipped leadership transfer for stopping non-leader member") - return nil - } - - if !s.isMultiNode() { - plog.Printf("skipped leadership transfer for single member cluster") - return nil - } - - transferee, ok := longestConnected(s.r.transport, s.cluster.MemberIDs()) - if !ok { - return ErrUnhealthy - } - - tm := s.Cfg.ReqTimeout() - ctx, cancel := context.WithTimeout(s.ctx, tm) - err := s.transferLeadership(ctx, s.Lead(), uint64(transferee)) - cancel() - return err -} - -// HardStop stops the server without coordination with other members in the cluster. -func (s *EtcdServer) HardStop() { - select { - case s.stop <- struct{}{}: - case <-s.done: - return - } - <-s.done -} - -// Stop stops the server gracefully, and shuts down the running goroutine. -// Stop should be called after a Start(s), otherwise it will block forever. -// When stopping leader, Stop transfers its leadership to one of its peers -// before stopping the server. -func (s *EtcdServer) Stop() { - if err := s.TransferLeadership(); err != nil { - plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err) - } - s.HardStop() -} - -// ReadyNotify returns a channel that will be closed when the server -// is ready to serve client requests -func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych } - -func (s *EtcdServer) stopWithDelay(d time.Duration, err error) { - select { - case <-time.After(d): - case <-s.done: - } - select { - case s.errorc <- err: - default: - } -} - -// StopNotify returns a channel that receives a empty struct -// when the server is stopped. -func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done } - -func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() } - -func (s *EtcdServer) LeaderStats() []byte { - lead := atomic.LoadUint64(&s.r.lead) - if lead != uint64(s.id) { - return nil - } - return s.lstats.JSON() -} - -func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() } - -func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error { - if s.authStore == nil { - // In the context of ordinary etcd process, s.authStore will never be nil. - // This branch is for handling cases in server_test.go - return nil - } - - // Note that this permission check is done in the API layer, - // so TOCTOU problem can be caused potentially in a schedule like this: - // update membership with user A -> revoke root role of A -> apply membership change - // in the state machine layer - // However, both of membership change and role management requires the root privilege. - // So careful operation by admins can prevent the problem. - authInfo, err := s.AuthInfoFromCtx(ctx) - if err != nil { - return err - } - - return s.AuthStore().IsAdminPermitted(authInfo) -} - -func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { - if err := s.checkMembershipOperationPermission(ctx); err != nil { - return nil, err - } - - if s.Cfg.StrictReconfigCheck { - // by default StrictReconfigCheck is enabled; reject new members if unhealthy - if !s.cluster.IsReadyToAddNewMember() { - plog.Warningf("not enough started members, rejecting member add %+v", memb) - return nil, ErrNotEnoughStartedMembers - } - if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.Members()) { - plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb) - return nil, ErrUnhealthy - } - } - - // TODO: move Member to protobuf type - b, err := json.Marshal(memb) - if err != nil { - return nil, err - } - cc := raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: uint64(memb.ID), - Context: b, - } - return s.configure(ctx, cc) -} - -func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) { - if err := s.checkMembershipOperationPermission(ctx); err != nil { - return nil, err - } - - // by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss - if err := s.mayRemoveMember(types.ID(id)); err != nil { - return nil, err - } - - cc := raftpb.ConfChange{ - Type: raftpb.ConfChangeRemoveNode, - NodeID: id, - } - return s.configure(ctx, cc) -} - -func (s *EtcdServer) mayRemoveMember(id types.ID) error { - if !s.Cfg.StrictReconfigCheck { - return nil - } - - if !s.cluster.IsReadyToRemoveMember(uint64(id)) { - plog.Warningf("not enough started members, rejecting remove member %s", id) - return ErrNotEnoughStartedMembers - } - - // downed member is safe to remove since it's not part of the active quorum - if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() { - return nil - } - - // protect quorum if some members are down - m := s.cluster.Members() - active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m) - if (active - 1) < 1+((len(m)-1)/2) { - plog.Warningf("reconfigure breaks active quorum, rejecting remove member %s", id) - return ErrUnhealthy - } - - return nil -} - -func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { - b, merr := json.Marshal(memb) - if merr != nil { - return nil, merr - } - - if err := s.checkMembershipOperationPermission(ctx); err != nil { - return nil, err - } - cc := raftpb.ConfChange{ - Type: raftpb.ConfChangeUpdateNode, - NodeID: uint64(memb.ID), - Context: b, - } - return s.configure(ctx, cc) -} - -// Implement the RaftTimer interface - -func (s *EtcdServer) Index() uint64 { return atomic.LoadUint64(&s.r.index) } - -func (s *EtcdServer) Term() uint64 { return atomic.LoadUint64(&s.r.term) } - -// Lead is only for testing purposes. -// TODO: add Raft server interface to expose raft related info: -// Index, Term, Lead, Committed, Applied, LastIndex, etc. -func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) } - -func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) } - -type confChangeResponse struct { - membs []*membership.Member - err error -} - -// configure sends a configuration change through consensus and -// then waits for it to be applied to the server. It -// will block until the change is performed or there is an error. -func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) { - cc.ID = s.reqIDGen.Next() - ch := s.w.Register(cc.ID) - start := time.Now() - if err := s.r.ProposeConfChange(ctx, cc); err != nil { - s.w.Trigger(cc.ID, nil) - return nil, err - } - select { - case x := <-ch: - if x == nil { - plog.Panicf("configure trigger value should never be nil") - } - resp := x.(*confChangeResponse) - return resp.membs, resp.err - case <-ctx.Done(): - s.w.Trigger(cc.ID, nil) // GC wait - return nil, s.parseProposeCtxErr(ctx.Err(), start) - case <-s.stopping: - return nil, ErrStopped - } -} - -// sync proposes a SYNC request and is non-blocking. -// This makes no guarantee that the request will be proposed or performed. -// The request will be canceled after the given timeout. -func (s *EtcdServer) sync(timeout time.Duration) { - req := pb.Request{ - Method: "SYNC", - ID: s.reqIDGen.Next(), - Time: time.Now().UnixNano(), - } - data := pbutil.MustMarshal(&req) - // There is no promise that node has leader when do SYNC request, - // so it uses goroutine to propose. - ctx, cancel := context.WithTimeout(s.ctx, timeout) - s.goAttach(func() { - s.r.Propose(ctx, data) - cancel() - }) -} - -// publish registers server information into the cluster. The information -// is the JSON representation of this server's member struct, updated with the -// static clientURLs of the server. -// The function keeps attempting to register until it succeeds, -// or its server is stopped. -func (s *EtcdServer) publish(timeout time.Duration) { - b, err := json.Marshal(s.attributes) - if err != nil { - plog.Panicf("json marshal error: %v", err) - return - } - req := pb.Request{ - Method: "PUT", - Path: membership.MemberAttributesStorePath(s.id), - Val: string(b), - } - - for { - ctx, cancel := context.WithTimeout(s.ctx, timeout) - _, err := s.Do(ctx, req) - cancel() - switch err { - case nil: - close(s.readych) - plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID()) - return - case ErrStopped: - plog.Infof("aborting publish because server is stopped") - return - default: - plog.Errorf("publish error: %v", err) - } - } -} - -func (s *EtcdServer) sendMergedSnap(merged snap.Message) { - atomic.AddInt64(&s.inflightSnapshots, 1) - - s.r.transport.SendSnapshot(merged) - s.goAttach(func() { - select { - case ok := <-merged.CloseNotify(): - // delay releasing inflight snapshot for another 30 seconds to - // block log compaction. - // If the follower still fails to catch up, it is probably just too slow - // to catch up. We cannot avoid the snapshot cycle anyway. - if ok { - select { - case <-time.After(releaseDelayAfterSnapshot): - case <-s.stopping: - } - } - atomic.AddInt64(&s.inflightSnapshots, -1) - case <-s.stopping: - return - } - }) -} - -// apply takes entries received from Raft (after it has been committed) and -// applies them to the current state of the EtcdServer. -// The given entries should not be empty. -func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appliedt uint64, appliedi uint64, shouldStop bool) { - for i := range es { - e := es[i] - switch e.Type { - case raftpb.EntryNormal: - s.applyEntryNormal(&e) - case raftpb.EntryConfChange: - // set the consistent index of current executing entry - if e.Index > s.consistIndex.ConsistentIndex() { - s.consistIndex.setConsistentIndex(e.Index) - } - var cc raftpb.ConfChange - pbutil.MustUnmarshal(&cc, e.Data) - removedSelf, err := s.applyConfChange(cc, confState) - s.setAppliedIndex(e.Index) - shouldStop = shouldStop || removedSelf - s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err}) - default: - plog.Panicf("entry type should be either EntryNormal or EntryConfChange") - } - atomic.StoreUint64(&s.r.index, e.Index) - atomic.StoreUint64(&s.r.term, e.Term) - appliedt = e.Term - appliedi = e.Index - } - return appliedt, appliedi, shouldStop -} - -// applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer -func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { - shouldApplyV3 := false - if e.Index > s.consistIndex.ConsistentIndex() { - // set the consistent index of current executing entry - s.consistIndex.setConsistentIndex(e.Index) - shouldApplyV3 = true - } - defer s.setAppliedIndex(e.Index) - - // raft state machine may generate noop entry when leader confirmation. - // skip it in advance to avoid some potential bug in the future - if len(e.Data) == 0 { - select { - case s.forceVersionC <- struct{}{}: - default: - } - // promote lessor when the local member is leader and finished - // applying all entries from the last term. - if s.isLeader() { - s.lessor.Promote(s.Cfg.electionTimeout()) - } - return - } - - var raftReq pb.InternalRaftRequest - if !pbutil.MaybeUnmarshal(&raftReq, e.Data) { // backward compatible - var r pb.Request - pbutil.MustUnmarshal(&r, e.Data) - s.w.Trigger(r.ID, s.applyV2Request(&r)) - return - } - if raftReq.V2 != nil { - req := raftReq.V2 - s.w.Trigger(req.ID, s.applyV2Request(req)) - return - } - - // do not re-apply applied entries. - if !shouldApplyV3 { - return - } - - id := raftReq.ID - if id == 0 { - id = raftReq.Header.ID - } - - var ar *applyResult - needResult := s.w.IsRegistered(id) - if needResult || !noSideEffect(&raftReq) { - if !needResult && raftReq.Txn != nil { - removeNeedlessRangeReqs(raftReq.Txn) - } - ar = s.applyV3.Apply(&raftReq) - } - - if ar == nil { - return - } - - if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 { - s.w.Trigger(id, ar) - return - } - - plog.Errorf("applying raft message exceeded backend quota") - s.goAttach(func() { - a := &pb.AlarmRequest{ - MemberID: uint64(s.ID()), - Action: pb.AlarmRequest_ACTIVATE, - Alarm: pb.AlarmType_NOSPACE, - } - s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a}) - s.w.Trigger(id, ar) - }) -} - -// applyConfChange applies a ConfChange to the server. It is only -// invoked with a ConfChange that has already passed through Raft -func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState) (bool, error) { - if err := s.cluster.ValidateConfigurationChange(cc); err != nil { - cc.NodeID = raft.None - s.r.ApplyConfChange(cc) - return false, err - } - *confState = *s.r.ApplyConfChange(cc) - switch cc.Type { - case raftpb.ConfChangeAddNode: - m := new(membership.Member) - if err := json.Unmarshal(cc.Context, m); err != nil { - plog.Panicf("unmarshal member should never fail: %v", err) - } - if cc.NodeID != uint64(m.ID) { - plog.Panicf("nodeID should always be equal to member ID") - } - s.cluster.AddMember(m) - if m.ID != s.id { - s.r.transport.AddPeer(m.ID, m.PeerURLs) - } - case raftpb.ConfChangeRemoveNode: - id := types.ID(cc.NodeID) - s.cluster.RemoveMember(id) - if id == s.id { - return true, nil - } - s.r.transport.RemovePeer(id) - case raftpb.ConfChangeUpdateNode: - m := new(membership.Member) - if err := json.Unmarshal(cc.Context, m); err != nil { - plog.Panicf("unmarshal member should never fail: %v", err) - } - if cc.NodeID != uint64(m.ID) { - plog.Panicf("nodeID should always be equal to member ID") - } - s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes) - if m.ID != s.id { - s.r.transport.UpdatePeer(m.ID, m.PeerURLs) - } - } - return false, nil -} - -// TODO: non-blocking snapshot -func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) { - clone := s.store.Clone() - // commit kv to write metadata (for example: consistent index) to disk. - // KV().commit() updates the consistent index in backend. - // All operations that update consistent index must be called sequentially - // from applyAll function. - // So KV().Commit() cannot run in parallel with apply. It has to be called outside - // the go routine created below. - s.KV().Commit() - - s.goAttach(func() { - d, err := clone.SaveNoCopy() - // TODO: current store will never fail to do a snapshot - // what should we do if the store might fail? - if err != nil { - plog.Panicf("store save should never fail: %v", err) - } - snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d) - if err != nil { - // the snapshot was done asynchronously with the progress of raft. - // raft might have already got a newer snapshot. - if err == raft.ErrSnapOutOfDate { - return - } - plog.Panicf("unexpected create snapshot error %v", err) - } - // SaveSnap saves the snapshot and releases the locked wal files - // to the snapshot index. - if err = s.r.storage.SaveSnap(snap); err != nil { - plog.Fatalf("save snapshot error: %v", err) - } - plog.Infof("saved snapshot at index %d", snap.Metadata.Index) - - // When sending a snapshot, etcd will pause compaction. - // After receives a snapshot, the slow follower needs to get all the entries right after - // the snapshot sent to catch up. If we do not pause compaction, the log entries right after - // the snapshot sent might already be compacted. It happens when the snapshot takes long time - // to send and save. Pausing compaction avoids triggering a snapshot sending cycle. - if atomic.LoadInt64(&s.inflightSnapshots) != 0 { - plog.Infof("skip compaction since there is an inflight snapshot") - return - } - - // keep some in memory log entries for slow followers. - compacti := uint64(1) - if snapi > numberOfCatchUpEntries { - compacti = snapi - numberOfCatchUpEntries - } - err = s.r.raftStorage.Compact(compacti) - if err != nil { - // the compaction was done asynchronously with the progress of raft. - // raft log might already been compact. - if err == raft.ErrCompacted { - return - } - plog.Panicf("unexpected compaction error %v", err) - } - plog.Infof("compacted raft log at %d", compacti) - }) -} - -// CutPeer drops messages to the specified peer. -func (s *EtcdServer) CutPeer(id types.ID) { - tr, ok := s.r.transport.(*rafthttp.Transport) - if ok { - tr.CutPeer(id) - } -} - -// MendPeer recovers the message dropping behavior of the given peer. -func (s *EtcdServer) MendPeer(id types.ID) { - tr, ok := s.r.transport.(*rafthttp.Transport) - if ok { - tr.MendPeer(id) - } -} - -func (s *EtcdServer) PauseSending() { s.r.pauseSending() } - -func (s *EtcdServer) ResumeSending() { s.r.resumeSending() } - -func (s *EtcdServer) ClusterVersion() *semver.Version { - if s.cluster == nil { - return nil - } - return s.cluster.Version() -} - -// monitorVersions checks the member's version every monitorVersionInterval. -// It updates the cluster version if all members agrees on a higher one. -// It prints out log if there is a member with a higher version than the -// local version. -func (s *EtcdServer) monitorVersions() { - for { - select { - case <-s.forceVersionC: - case <-time.After(monitorVersionInterval): - case <-s.stopping: - return - } - - if s.Leader() != s.ID() { - continue - } - - v := decideClusterVersion(getVersions(s.cluster, s.id, s.peerRt)) - if v != nil { - // only keep major.minor version for comparison - v = &semver.Version{ - Major: v.Major, - Minor: v.Minor, - } - } - - // if the current version is nil: - // 1. use the decided version if possible - // 2. or use the min cluster version - if s.cluster.Version() == nil { - verStr := version.MinClusterVersion - if v != nil { - verStr = v.String() - } - s.goAttach(func() { s.updateClusterVersion(verStr) }) - continue - } - - // update cluster version only if the decided version is greater than - // the current cluster version - if v != nil && s.cluster.Version().LessThan(*v) { - s.goAttach(func() { s.updateClusterVersion(v.String()) }) - } - } -} - -func (s *EtcdServer) updateClusterVersion(ver string) { - if s.cluster.Version() == nil { - plog.Infof("setting up the initial cluster version to %s", version.Cluster(ver)) - } else { - plog.Infof("updating the cluster version from %s to %s", version.Cluster(s.cluster.Version().String()), version.Cluster(ver)) - } - req := pb.Request{ - Method: "PUT", - Path: membership.StoreClusterVersionKey(), - Val: ver, - } - ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout()) - _, err := s.Do(ctx, req) - cancel() - switch err { - case nil: - return - case ErrStopped: - plog.Infof("aborting update cluster version because server is stopped") - return - default: - plog.Errorf("error updating cluster version (%v)", err) - } -} - -func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { - switch err { - case context.Canceled: - return ErrCanceled - case context.DeadlineExceeded: - s.leadTimeMu.RLock() - curLeadElected := s.leadElectedTime - s.leadTimeMu.RUnlock() - prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond) - if start.After(prevLeadLost) && start.Before(curLeadElected) { - return ErrTimeoutDueToLeaderFail - } - - lead := types.ID(atomic.LoadUint64(&s.r.lead)) - switch lead { - case types.ID(raft.None): - // TODO: return error to specify it happens because the cluster does not have leader now - case s.ID(): - if !isConnectedToQuorumSince(s.r.transport, start, s.ID(), s.cluster.Members()) { - return ErrTimeoutDueToConnectionLost - } - default: - if !isConnectedSince(s.r.transport, start, lead) { - return ErrTimeoutDueToConnectionLost - } - } - - return ErrTimeout - default: - return err - } -} - -func (s *EtcdServer) KV() mvcc.ConsistentWatchableKV { return s.kv } -func (s *EtcdServer) Backend() backend.Backend { - s.bemu.Lock() - defer s.bemu.Unlock() - return s.be -} - -func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore } - -func (s *EtcdServer) restoreAlarms() error { - s.applyV3 = s.newApplierV3() - as, err := alarm.NewAlarmStore(s) - if err != nil { - return err - } - s.alarmStore = as - if len(as.Get(pb.AlarmType_NOSPACE)) > 0 { - s.applyV3 = newApplierV3Capped(s.applyV3) - } - return nil -} - -func (s *EtcdServer) getAppliedIndex() uint64 { - return atomic.LoadUint64(&s.appliedIndex) -} - -func (s *EtcdServer) setAppliedIndex(v uint64) { - atomic.StoreUint64(&s.appliedIndex, v) -} - -func (s *EtcdServer) getCommittedIndex() uint64 { - return atomic.LoadUint64(&s.committedIndex) -} - -func (s *EtcdServer) setCommittedIndex(v uint64) { - atomic.StoreUint64(&s.committedIndex, v) -} - -// goAttach creates a goroutine on a given function and tracks it using -// the etcdserver waitgroup. -func (s *EtcdServer) goAttach(f func()) { - s.wgMu.RLock() // this blocks with ongoing close(s.stopping) - defer s.wgMu.RUnlock() - select { - case <-s.stopping: - plog.Warning("server has stopped (skipping goAttach)") - return - default: - } - - // now safe to add since waitgroup wait has not started yet - s.wg.Add(1) - go func() { - defer s.wg.Done() - f() - }() -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go deleted file mode 100644 index 928aa95b6b162..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "io" - - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap" -) - -// createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf), -// a snapshot of v2 store inside raft.Snapshot as []byte, a snapshot of v3 KV in the top level message -// as ReadCloser. -func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) snap.Message { - // get a snapshot of v2 store as []byte - clone := s.store.Clone() - d, err := clone.SaveNoCopy() - if err != nil { - plog.Panicf("store save should never fail: %v", err) - } - - // commit kv to write metadata(for example: consistent index). - s.KV().Commit() - dbsnap := s.be.Snapshot() - // get a snapshot of v3 KV as readCloser - rc := newSnapshotReaderCloser(dbsnap) - - // put the []byte snapshot of store into raft snapshot and return the merged snapshot with - // KV readCloser snapshot. - snapshot := raftpb.Snapshot{ - Metadata: raftpb.SnapshotMetadata{ - Index: snapi, - Term: snapt, - ConfState: confState, - }, - Data: d, - } - m.Snapshot = snapshot - - return *snap.NewMessage(m, rc, dbsnap.Size()) -} - -func newSnapshotReaderCloser(snapshot backend.Snapshot) io.ReadCloser { - pr, pw := io.Pipe() - go func() { - n, err := snapshot.WriteTo(pw) - if err == nil { - plog.Infof("wrote database snapshot out [total bytes: %d]", n) - } else { - plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err) - } - pw.CloseWithError(err) - err = snapshot.Close() - if err != nil { - plog.Panicf("failed to close database snapshot: %v", err) - } - }() - return pr -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/storage.go b/vendor/github.com/coreos/etcd/etcdserver/storage.go deleted file mode 100644 index aa8f87569dbea..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/storage.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "io" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap" - "github.com/coreos/etcd/wal" - "github.com/coreos/etcd/wal/walpb" -) - -type Storage interface { - // Save function saves ents and state to the underlying stable storage. - // Save MUST block until st and ents are on stable storage. - Save(st raftpb.HardState, ents []raftpb.Entry) error - // SaveSnap function saves snapshot to the underlying stable storage. - SaveSnap(snap raftpb.Snapshot) error - // Close closes the Storage and performs finalization. - Close() error -} - -type storage struct { - *wal.WAL - *snap.Snapshotter -} - -func NewStorage(w *wal.WAL, s *snap.Snapshotter) Storage { - return &storage{w, s} -} - -// SaveSnap saves the snapshot to disk and release the locked -// wal files since they will not be used. -func (st *storage) SaveSnap(snap raftpb.Snapshot) error { - walsnap := walpb.Snapshot{ - Index: snap.Metadata.Index, - Term: snap.Metadata.Term, - } - err := st.WAL.SaveSnapshot(walsnap) - if err != nil { - return err - } - err = st.Snapshotter.SaveSnap(snap) - if err != nil { - return err - } - return st.WAL.ReleaseLockTo(snap.Metadata.Index) -} - -func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) { - var ( - err error - wmetadata []byte - ) - - repaired := false - for { - if w, err = wal.Open(waldir, snap); err != nil { - plog.Fatalf("open wal error: %v", err) - } - if wmetadata, st, ents, err = w.ReadAll(); err != nil { - w.Close() - // we can only repair ErrUnexpectedEOF and we never repair twice. - if repaired || err != io.ErrUnexpectedEOF { - plog.Fatalf("read wal error (%v) and cannot be repaired", err) - } - if !wal.Repair(waldir) { - plog.Fatalf("WAL error (%v) cannot be repaired", err) - } else { - plog.Infof("repaired WAL error (%v)", err) - repaired = true - } - continue - } - break - } - var metadata pb.Metadata - pbutil.MustUnmarshal(&metadata, wmetadata) - id = types.ID(metadata.NodeID) - cid = types.ID(metadata.ClusterID) - return -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/util.go b/vendor/github.com/coreos/etcd/etcdserver/util.go deleted file mode 100644 index e3896ffc2d3dd..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/util.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "time" - - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/rafthttp" -) - -// isConnectedToQuorumSince checks whether the local member is connected to the -// quorum of the cluster since the given time. -func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool { - return numConnectedSince(transport, since, self, members) >= (len(members)/2)+1 -} - -// isConnectedSince checks whether the local member is connected to the -// remote member since the given time. -func isConnectedSince(transport rafthttp.Transporter, since time.Time, remote types.ID) bool { - t := transport.ActiveSince(remote) - return !t.IsZero() && t.Before(since) -} - -// isConnectedFullySince checks whether the local member is connected to all -// members in the cluster since the given time. -func isConnectedFullySince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool { - return numConnectedSince(transport, since, self, members) == len(members) -} - -// numConnectedSince counts how many members are connected to the local member -// since the given time. -func numConnectedSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) int { - connectedNum := 0 - for _, m := range members { - if m.ID == self || isConnectedSince(transport, since, m.ID) { - connectedNum++ - } - } - return connectedNum -} - -// longestConnected chooses the member with longest active-since-time. -// It returns false, if nothing is active. -func longestConnected(tp rafthttp.Transporter, membs []types.ID) (types.ID, bool) { - var longest types.ID - var oldest time.Time - for _, id := range membs { - tm := tp.ActiveSince(id) - if tm.IsZero() { // inactive - continue - } - - if oldest.IsZero() { // first longest candidate - oldest = tm - longest = id - } - - if tm.Before(oldest) { - oldest = tm - longest = id - } - } - if uint64(longest) == 0 { - return longest, false - } - return longest, true -} - -type notifier struct { - c chan struct{} - err error -} - -func newNotifier() *notifier { - return ¬ifier{ - c: make(chan struct{}), - } -} - -func (nc *notifier) notify(err error) { - nc.err = err - close(nc.c) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/v2_server.go b/vendor/github.com/coreos/etcd/etcdserver/v2_server.go deleted file mode 100644 index 72c4eb7c5cc38..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/v2_server.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "time" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" -) - -type v2API interface { - Post(ctx context.Context, r *pb.Request) (Response, error) - Put(ctx context.Context, r *pb.Request) (Response, error) - Delete(ctx context.Context, r *pb.Request) (Response, error) - QGet(ctx context.Context, r *pb.Request) (Response, error) - Get(ctx context.Context, r *pb.Request) (Response, error) - Head(ctx context.Context, r *pb.Request) (Response, error) -} - -type v2apiStore struct{ s *EtcdServer } - -func (a *v2apiStore) Post(ctx context.Context, r *pb.Request) (Response, error) { - return a.processRaftRequest(ctx, r) -} - -func (a *v2apiStore) Put(ctx context.Context, r *pb.Request) (Response, error) { - return a.processRaftRequest(ctx, r) -} - -func (a *v2apiStore) Delete(ctx context.Context, r *pb.Request) (Response, error) { - return a.processRaftRequest(ctx, r) -} - -func (a *v2apiStore) QGet(ctx context.Context, r *pb.Request) (Response, error) { - return a.processRaftRequest(ctx, r) -} - -func (a *v2apiStore) processRaftRequest(ctx context.Context, r *pb.Request) (Response, error) { - data, err := r.Marshal() - if err != nil { - return Response{}, err - } - ch := a.s.w.Register(r.ID) - - start := time.Now() - a.s.r.Propose(ctx, data) - proposalsPending.Inc() - defer proposalsPending.Dec() - - select { - case x := <-ch: - resp := x.(Response) - return resp, resp.err - case <-ctx.Done(): - proposalsFailed.Inc() - a.s.w.Trigger(r.ID, nil) // GC wait - return Response{}, a.s.parseProposeCtxErr(ctx.Err(), start) - case <-a.s.stopping: - } - return Response{}, ErrStopped -} - -func (a *v2apiStore) Get(ctx context.Context, r *pb.Request) (Response, error) { - if r.Wait { - wc, err := a.s.store.Watch(r.Path, r.Recursive, r.Stream, r.Since) - if err != nil { - return Response{}, err - } - return Response{Watcher: wc}, nil - } - ev, err := a.s.store.Get(r.Path, r.Recursive, r.Sorted) - if err != nil { - return Response{}, err - } - return Response{Event: ev}, nil -} - -func (a *v2apiStore) Head(ctx context.Context, r *pb.Request) (Response, error) { - ev, err := a.s.store.Get(r.Path, r.Recursive, r.Sorted) - if err != nil { - return Response{}, err - } - return Response{Event: ev}, nil -} - -// Do interprets r and performs an operation on s.store according to r.Method -// and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with -// Quorum == true, r will be sent through consensus before performing its -// respective operation. Do will block until an action is performed or there is -// an error. -func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) { - r.ID = s.reqIDGen.Next() - if r.Method == "GET" && r.Quorum { - r.Method = "QGET" - } - v2api := (v2API)(&v2apiStore{s}) - switch r.Method { - case "POST": - return v2api.Post(ctx, &r) - case "PUT": - return v2api.Put(ctx, &r) - case "DELETE": - return v2api.Delete(ctx, &r) - case "QGET": - return v2api.QGet(ctx, &r) - case "GET": - return v2api.Get(ctx, &r) - case "HEAD": - return v2api.Head(ctx, &r) - } - return Response{}, ErrUnknownMethod -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go deleted file mode 100644 index ae449bbf22ff4..0000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go +++ /dev/null @@ -1,686 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "bytes" - "encoding/binary" - "time" - - "github.com/gogo/protobuf/proto" - - "github.com/coreos/etcd/auth" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/lease/leasehttp" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/raft" - - "golang.org/x/net/context" -) - -const ( - // In the health case, there might be a small gap (10s of entries) between - // the applied index and committed index. - // However, if the committed entries are very heavy to apply, the gap might grow. - // We should stop accepting new proposals if the gap growing to a certain point. - maxGapBetweenApplyAndCommitIndex = 5000 -) - -type RaftKV interface { - Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) - Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) - DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) - Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) - Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) -} - -type Lessor interface { - // LeaseGrant sends LeaseGrant request to raft and apply it after committed. - LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) - // LeaseRevoke sends LeaseRevoke request to raft and apply it after committed. - LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) - - // LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error - // is returned. - LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) - - // LeaseTimeToLive retrieves lease information. - LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) -} - -type Authenticator interface { - AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) - AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) - Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) - UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) - UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) - UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) - UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) - UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) - UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) - RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) - RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) - RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) - RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) - RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) - UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) - RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) -} - -func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { - if !r.Serializable { - err := s.linearizableReadNotify(ctx) - if err != nil { - return nil, err - } - } - var resp *pb.RangeResponse - var err error - chk := func(ai *auth.AuthInfo) error { - return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd) - } - get := func() { resp, err = s.applyV3Base.Range(nil, r) } - if serr := s.doSerialize(ctx, chk, get); serr != nil { - return nil, serr - } - return resp, err -} - -func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r}) - if err != nil { - return nil, err - } - return resp.(*pb.PutResponse), nil -} - -func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r}) - if err != nil { - return nil, err - } - return resp.(*pb.DeleteRangeResponse), nil -} - -func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - if isTxnReadonly(r) { - if !isTxnSerializable(r) { - err := s.linearizableReadNotify(ctx) - if err != nil { - return nil, err - } - } - var resp *pb.TxnResponse - var err error - chk := func(ai *auth.AuthInfo) error { - return checkTxnAuth(s.authStore, ai, r) - } - get := func() { resp, err = s.applyV3Base.Txn(r) } - if serr := s.doSerialize(ctx, chk, get); serr != nil { - return nil, serr - } - return resp, err - } - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r}) - if err != nil { - return nil, err - } - return resp.(*pb.TxnResponse), nil -} - -func isTxnSerializable(r *pb.TxnRequest) bool { - for _, u := range r.Success { - if r := u.GetRequestRange(); r == nil || !r.Serializable { - return false - } - } - for _, u := range r.Failure { - if r := u.GetRequestRange(); r == nil || !r.Serializable { - return false - } - } - return true -} - -func isTxnReadonly(r *pb.TxnRequest) bool { - for _, u := range r.Success { - if r := u.GetRequestRange(); r == nil { - return false - } - } - for _, u := range r.Failure { - if r := u.GetRequestRange(); r == nil { - return false - } - } - return true -} - -func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) { - result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r}) - if r.Physical && result != nil && result.physc != nil { - <-result.physc - // The compaction is done deleting keys; the hash is now settled - // but the data is not necessarily committed. If there's a crash, - // the hash may revert to a hash prior to compaction completing - // if the compaction resumes. Force the finished compaction to - // commit so it won't resume following a crash. - s.be.ForceCommit() - } - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - resp := result.resp.(*pb.CompactionResponse) - if resp == nil { - resp = &pb.CompactionResponse{} - } - if resp.Header == nil { - resp.Header = &pb.ResponseHeader{} - } - resp.Header.Revision = s.kv.Rev() - return resp, nil -} - -func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - // no id given? choose one - for r.ID == int64(lease.NoLease) { - // only use positive int64 id's - r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1)) - } - resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r}) - if err != nil { - return nil, err - } - return resp.(*pb.LeaseGrantResponse), nil -} - -func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r}) - if err != nil { - return nil, err - } - return resp.(*pb.LeaseRevokeResponse), nil -} - -func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) { - ttl, err := s.lessor.Renew(id) - if err == nil { // already requested to primary lessor(leader) - return ttl, nil - } - if err != lease.ErrNotPrimary { - return -1, err - } - - cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) - defer cancel() - - // renewals don't go through raft; forward to leader manually - for cctx.Err() == nil && err != nil { - leader, lerr := s.waitLeader(cctx) - if lerr != nil { - return -1, lerr - } - for _, url := range leader.PeerURLs { - lurl := url + leasehttp.LeasePrefix - ttl, err = leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt) - if err == nil || err == lease.ErrLeaseNotFound { - return ttl, err - } - } - } - return -1, ErrTimeout -} - -func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { - if s.Leader() == s.ID() { - // primary; timetolive directly from leader - le := s.lessor.Lookup(lease.LeaseID(r.ID)) - if le == nil { - return nil, lease.ErrLeaseNotFound - } - // TODO: fill out ResponseHeader - resp := &pb.LeaseTimeToLiveResponse{Header: &pb.ResponseHeader{}, ID: r.ID, TTL: int64(le.Remaining().Seconds()), GrantedTTL: le.TTL()} - if r.Keys { - ks := le.Keys() - kbs := make([][]byte, len(ks)) - for i := range ks { - kbs[i] = []byte(ks[i]) - } - resp.Keys = kbs - } - return resp, nil - } - - cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) - defer cancel() - - // forward to leader - for cctx.Err() == nil { - leader, err := s.waitLeader(cctx) - if err != nil { - return nil, err - } - for _, url := range leader.PeerURLs { - lurl := url + leasehttp.LeaseInternalPrefix - resp, err := leasehttp.TimeToLiveHTTP(cctx, lease.LeaseID(r.ID), r.Keys, lurl, s.peerRt) - if err == nil { - return resp.LeaseTimeToLiveResponse, nil - } - if err == lease.ErrLeaseNotFound { - return nil, err - } - } - } - return nil, ErrTimeout -} - -func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) { - leader := s.cluster.Member(s.Leader()) - for leader == nil { - // wait an election - dur := time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond - select { - case <-time.After(dur): - leader = s.cluster.Member(s.Leader()) - case <-s.stopping: - return nil, ErrStopped - case <-ctx.Done(): - return nil, ErrNoLeader - } - } - if leader == nil || len(leader.PeerURLs) == 0 { - return nil, ErrNoLeader - } - return leader, nil -} - -func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) { - resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AlarmResponse), nil -} - -func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) { - resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthEnableResponse), nil -} - -func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthDisableResponse), nil -} - -func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { - if err := s.linearizableReadNotify(ctx); err != nil { - return nil, err - } - - var resp proto.Message - for { - checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password) - if err != nil { - if err != auth.ErrAuthNotEnabled { - plog.Errorf("invalid authentication request to user %s was issued", r.Name) - } - return nil, err - } - - st, err := s.AuthStore().GenTokenPrefix() - if err != nil { - return nil, err - } - - internalReq := &pb.InternalAuthenticateRequest{ - Name: r.Name, - Password: r.Password, - SimpleToken: st, - } - - resp, err = s.raftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq}) - if err != nil { - return nil, err - } - if checkedRevision == s.AuthStore().Revision() { - break - } - plog.Infof("revision when password checked is obsolete, retrying") - } - - return resp.(*pb.AuthenticateResponse), nil -} - -func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserAddResponse), nil -} - -func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserDeleteResponse), nil -} - -func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserChangePasswordResponse), nil -} - -func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserGrantRoleResponse), nil -} - -func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserGetResponse), nil -} - -func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserListResponse), nil -} - -func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserRevokeRoleResponse), nil -} - -func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleAddResponse), nil -} - -func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleGrantPermissionResponse), nil -} - -func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleGetResponse), nil -} - -func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleListResponse), nil -} - -func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleRevokePermissionResponse), nil -} - -func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleDeleteResponse), nil -} - -func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { - result, err := s.processInternalRaftRequestOnce(ctx, r) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp, nil -} - -func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { - for { - resp, err := s.raftRequestOnce(ctx, r) - if err != auth.ErrAuthOldRevision { - return resp, err - } - } -} - -// doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure. -func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error { - for { - ai, err := s.AuthInfoFromCtx(ctx) - if err != nil { - return err - } - if ai == nil { - // chk expects non-nil AuthInfo; use empty credentials - ai = &auth.AuthInfo{} - } - if err = chk(ai); err != nil { - if err == auth.ErrAuthOldRevision { - continue - } - return err - } - // fetch response for serialized request - get() - // empty credentials or current auth info means no need to retry - if ai.Revision == 0 || ai.Revision == s.authStore.Revision() { - return nil - } - // avoid TOCTOU error, retry of the request is required. - } -} - -func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) { - ai := s.getAppliedIndex() - ci := s.getCommittedIndex() - if ci > ai+maxGapBetweenApplyAndCommitIndex { - return nil, ErrTooManyRequests - } - - r.Header = &pb.RequestHeader{ - ID: s.reqIDGen.Next(), - } - - authInfo, err := s.AuthInfoFromCtx(ctx) - if err != nil { - return nil, err - } - if authInfo != nil { - r.Header.Username = authInfo.Username - r.Header.AuthRevision = authInfo.Revision - } - - data, err := r.Marshal() - if err != nil { - return nil, err - } - - if len(data) > int(s.Cfg.MaxRequestBytes) { - return nil, ErrRequestTooLarge - } - - id := r.ID - if id == 0 { - id = r.Header.ID - } - ch := s.w.Register(id) - - cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) - defer cancel() - - start := time.Now() - s.r.Propose(cctx, data) - proposalsPending.Inc() - defer proposalsPending.Dec() - - select { - case x := <-ch: - return x.(*applyResult), nil - case <-cctx.Done(): - proposalsFailed.Inc() - s.w.Trigger(id, nil) // GC wait - return nil, s.parseProposeCtxErr(cctx.Err(), start) - case <-s.done: - return nil, ErrStopped - } -} - -// Watchable returns a watchable interface attached to the etcdserver. -func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() } - -func (s *EtcdServer) linearizableReadLoop() { - var rs raft.ReadState - - for { - ctx := make([]byte, 8) - binary.BigEndian.PutUint64(ctx, s.reqIDGen.Next()) - - select { - case <-s.readwaitc: - case <-s.stopping: - return - } - - nextnr := newNotifier() - - s.readMu.Lock() - nr := s.readNotifier - s.readNotifier = nextnr - s.readMu.Unlock() - - cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) - if err := s.r.ReadIndex(cctx, ctx); err != nil { - cancel() - if err == raft.ErrStopped { - return - } - plog.Errorf("failed to get read index from raft: %v", err) - nr.notify(err) - continue - } - cancel() - - var ( - timeout bool - done bool - ) - for !timeout && !done { - select { - case rs = <-s.r.readStateC: - done = bytes.Equal(rs.RequestCtx, ctx) - if !done { - // a previous request might time out. now we should ignore the response of it and - // continue waiting for the response of the current requests. - plog.Warningf("ignored out-of-date read index response (want %v, got %v)", rs.RequestCtx, ctx) - } - case <-time.After(s.Cfg.ReqTimeout()): - plog.Warningf("timed out waiting for read index response") - nr.notify(ErrTimeout) - timeout = true - case <-s.stopping: - return - } - } - if !done { - continue - } - - if ai := s.getAppliedIndex(); ai < rs.Index { - select { - case <-s.applyWait.Wait(rs.Index): - case <-s.stopping: - return - } - } - // unblock all l-reads requested at indices before rs.Index - nr.notify(nil) - } -} - -func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error { - s.readMu.RLock() - nc := s.readNotifier - s.readMu.RUnlock() - - // signal linearizable loop for current notify if it hasn't been already - select { - case s.readwaitc <- struct{}{}: - default: - } - - // wait for read state notification - select { - case <-nc.c: - return nc.err - case <-ctx.Done(): - return ctx.Err() - case <-s.done: - return ErrStopped - } -} - -func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) { - if s.Cfg.ClientCertAuthEnabled { - authInfo := s.AuthStore().AuthInfoFromTLS(ctx) - if authInfo != nil { - return authInfo, nil - } - } - - return s.AuthStore().AuthInfoFromCtx(ctx) -} diff --git a/vendor/github.com/coreos/etcd/main.go b/vendor/github.com/coreos/etcd/main.go deleted file mode 100644 index 0b7357376e675..0000000000000 --- a/vendor/github.com/coreos/etcd/main.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package main is a simple wrapper of the real etcd entrypoint package -// (located at github.com/coreos/etcd/etcdmain) to ensure that etcd is still -// "go getable"; e.g. `go get github.com/coreos/etcd` works as expected and -// builds a binary in $GOBIN/etcd -// -// This package should NOT be extended or modified in any way; to modify the -// etcd binary, work in the `github.com/coreos/etcd/etcdmain` package. -// -package main - -import "github.com/coreos/etcd/etcdmain" - -func main() { - etcdmain.Main() -} diff --git a/vendor/github.com/coreos/etcd/mvcc/doc.go b/vendor/github.com/coreos/etcd/mvcc/doc.go deleted file mode 100644 index ad5be03086fbf..0000000000000 --- a/vendor/github.com/coreos/etcd/mvcc/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package mvcc defines etcd's stable MVCC storage. -package mvcc diff --git a/vendor/github.com/coreos/etcd/mvcc/index.go b/vendor/github.com/coreos/etcd/mvcc/index.go deleted file mode 100644 index 991289cdd5c4d..0000000000000 --- a/vendor/github.com/coreos/etcd/mvcc/index.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "sort" - "sync" - - "github.com/google/btree" -) - -type index interface { - Get(key []byte, atRev int64) (rev, created revision, ver int64, err error) - Range(key, end []byte, atRev int64) ([][]byte, []revision) - Put(key []byte, rev revision) - Tombstone(key []byte, rev revision) error - RangeSince(key, end []byte, rev int64) []revision - Compact(rev int64) map[revision]struct{} - Equal(b index) bool - - Insert(ki *keyIndex) - KeyIndex(ki *keyIndex) *keyIndex -} - -type treeIndex struct { - sync.RWMutex - tree *btree.BTree -} - -func newTreeIndex() index { - return &treeIndex{ - tree: btree.New(32), - } -} - -func (ti *treeIndex) Put(key []byte, rev revision) { - keyi := &keyIndex{key: key} - - ti.Lock() - defer ti.Unlock() - item := ti.tree.Get(keyi) - if item == nil { - keyi.put(rev.main, rev.sub) - ti.tree.ReplaceOrInsert(keyi) - return - } - okeyi := item.(*keyIndex) - okeyi.put(rev.main, rev.sub) -} - -func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) { - keyi := &keyIndex{key: key} - ti.RLock() - defer ti.RUnlock() - if keyi = ti.keyIndex(keyi); keyi == nil { - return revision{}, revision{}, 0, ErrRevisionNotFound - } - return keyi.get(atRev) -} - -func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex { - ti.RLock() - defer ti.RUnlock() - return ti.keyIndex(keyi) -} - -func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex { - if item := ti.tree.Get(keyi); item != nil { - return item.(*keyIndex) - } - return nil -} - -func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) { - if end == nil { - rev, _, _, err := ti.Get(key, atRev) - if err != nil { - return nil, nil - } - return [][]byte{key}, []revision{rev} - } - - keyi := &keyIndex{key: key} - endi := &keyIndex{key: end} - - ti.RLock() - defer ti.RUnlock() - - ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool { - if len(endi.key) > 0 && !item.Less(endi) { - return false - } - curKeyi := item.(*keyIndex) - rev, _, _, err := curKeyi.get(atRev) - if err != nil { - return true - } - revs = append(revs, rev) - keys = append(keys, curKeyi.key) - return true - }) - - return keys, revs -} - -func (ti *treeIndex) Tombstone(key []byte, rev revision) error { - keyi := &keyIndex{key: key} - - ti.Lock() - defer ti.Unlock() - item := ti.tree.Get(keyi) - if item == nil { - return ErrRevisionNotFound - } - - ki := item.(*keyIndex) - return ki.tombstone(rev.main, rev.sub) -} - -// RangeSince returns all revisions from key(including) to end(excluding) -// at or after the given rev. The returned slice is sorted in the order -// of revision. -func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision { - ti.RLock() - defer ti.RUnlock() - - keyi := &keyIndex{key: key} - if end == nil { - item := ti.tree.Get(keyi) - if item == nil { - return nil - } - keyi = item.(*keyIndex) - return keyi.since(rev) - } - - endi := &keyIndex{key: end} - var revs []revision - ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool { - if len(endi.key) > 0 && !item.Less(endi) { - return false - } - curKeyi := item.(*keyIndex) - revs = append(revs, curKeyi.since(rev)...) - return true - }) - sort.Sort(revisions(revs)) - - return revs -} - -func (ti *treeIndex) Compact(rev int64) map[revision]struct{} { - available := make(map[revision]struct{}) - var emptyki []*keyIndex - plog.Printf("store.index: compact %d", rev) - // TODO: do not hold the lock for long time? - // This is probably OK. Compacting 10M keys takes O(10ms). - ti.Lock() - defer ti.Unlock() - ti.tree.Ascend(compactIndex(rev, available, &emptyki)) - for _, ki := range emptyki { - item := ti.tree.Delete(ki) - if item == nil { - plog.Panic("store.index: unexpected delete failure during compaction") - } - } - return available -} - -func compactIndex(rev int64, available map[revision]struct{}, emptyki *[]*keyIndex) func(i btree.Item) bool { - return func(i btree.Item) bool { - keyi := i.(*keyIndex) - keyi.compact(rev, available) - if keyi.isEmpty() { - *emptyki = append(*emptyki, keyi) - } - return true - } -} - -func (a *treeIndex) Equal(bi index) bool { - b := bi.(*treeIndex) - - if a.tree.Len() != b.tree.Len() { - return false - } - - equal := true - - a.tree.Ascend(func(item btree.Item) bool { - aki := item.(*keyIndex) - bki := b.tree.Get(item).(*keyIndex) - if !aki.equal(bki) { - equal = false - return false - } - return true - }) - - return equal -} - -func (ti *treeIndex) Insert(ki *keyIndex) { - ti.Lock() - defer ti.Unlock() - ti.tree.ReplaceOrInsert(ki) -} diff --git a/vendor/github.com/coreos/etcd/mvcc/key_index.go b/vendor/github.com/coreos/etcd/mvcc/key_index.go deleted file mode 100644 index 9104f9b2d36a6..0000000000000 --- a/vendor/github.com/coreos/etcd/mvcc/key_index.go +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "bytes" - "errors" - "fmt" - - "github.com/google/btree" -) - -var ( - ErrRevisionNotFound = errors.New("mvcc: revision not found") -) - -// keyIndex stores the revisions of a key in the backend. -// Each keyIndex has at least one key generation. -// Each generation might have several key versions. -// Tombstone on a key appends an tombstone version at the end -// of the current generation and creates a new empty generation. -// Each version of a key has an index pointing to the backend. -// -// For example: put(1.0);put(2.0);tombstone(3.0);put(4.0);tombstone(5.0) on key "foo" -// generate a keyIndex: -// key: "foo" -// rev: 5 -// generations: -// {empty} -// {4.0, 5.0(t)} -// {1.0, 2.0, 3.0(t)} -// -// Compact a keyIndex removes the versions with smaller or equal to -// rev except the largest one. If the generation becomes empty -// during compaction, it will be removed. if all the generations get -// removed, the keyIndex should be removed. - -// For example: -// compact(2) on the previous example -// generations: -// {empty} -// {4.0, 5.0(t)} -// {2.0, 3.0(t)} -// -// compact(4) -// generations: -// {empty} -// {4.0, 5.0(t)} -// -// compact(5): -// generations: -// {empty} -> key SHOULD be removed. -// -// compact(6): -// generations: -// {empty} -> key SHOULD be removed. -type keyIndex struct { - key []byte - modified revision // the main rev of the last modification - generations []generation -} - -// put puts a revision to the keyIndex. -func (ki *keyIndex) put(main int64, sub int64) { - rev := revision{main: main, sub: sub} - - if !rev.GreaterThan(ki.modified) { - plog.Panicf("store.keyindex: put with unexpected smaller revision [%v / %v]", rev, ki.modified) - } - if len(ki.generations) == 0 { - ki.generations = append(ki.generations, generation{}) - } - g := &ki.generations[len(ki.generations)-1] - if len(g.revs) == 0 { // create a new key - keysGauge.Inc() - g.created = rev - } - g.revs = append(g.revs, rev) - g.ver++ - ki.modified = rev -} - -func (ki *keyIndex) restore(created, modified revision, ver int64) { - if len(ki.generations) != 0 { - plog.Panicf("store.keyindex: cannot restore non-empty keyIndex") - } - - ki.modified = modified - g := generation{created: created, ver: ver, revs: []revision{modified}} - ki.generations = append(ki.generations, g) - keysGauge.Inc() -} - -// tombstone puts a revision, pointing to a tombstone, to the keyIndex. -// It also creates a new empty generation in the keyIndex. -// It returns ErrRevisionNotFound when tombstone on an empty generation. -func (ki *keyIndex) tombstone(main int64, sub int64) error { - if ki.isEmpty() { - plog.Panicf("store.keyindex: unexpected tombstone on empty keyIndex %s", string(ki.key)) - } - if ki.generations[len(ki.generations)-1].isEmpty() { - return ErrRevisionNotFound - } - ki.put(main, sub) - ki.generations = append(ki.generations, generation{}) - keysGauge.Dec() - return nil -} - -// get gets the modified, created revision and version of the key that satisfies the given atRev. -// Rev must be higher than or equal to the given atRev. -func (ki *keyIndex) get(atRev int64) (modified, created revision, ver int64, err error) { - if ki.isEmpty() { - plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) - } - g := ki.findGeneration(atRev) - if g.isEmpty() { - return revision{}, revision{}, 0, ErrRevisionNotFound - } - - n := g.walk(func(rev revision) bool { return rev.main > atRev }) - if n != -1 { - return g.revs[n], g.created, g.ver - int64(len(g.revs)-n-1), nil - } - - return revision{}, revision{}, 0, ErrRevisionNotFound -} - -// since returns revisions since the given rev. Only the revision with the -// largest sub revision will be returned if multiple revisions have the same -// main revision. -func (ki *keyIndex) since(rev int64) []revision { - if ki.isEmpty() { - plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) - } - since := revision{rev, 0} - var gi int - // find the generations to start checking - for gi = len(ki.generations) - 1; gi > 0; gi-- { - g := ki.generations[gi] - if g.isEmpty() { - continue - } - if since.GreaterThan(g.created) { - break - } - } - - var revs []revision - var last int64 - for ; gi < len(ki.generations); gi++ { - for _, r := range ki.generations[gi].revs { - if since.GreaterThan(r) { - continue - } - if r.main == last { - // replace the revision with a new one that has higher sub value, - // because the original one should not be seen by external - revs[len(revs)-1] = r - continue - } - revs = append(revs, r) - last = r.main - } - } - return revs -} - -// compact compacts a keyIndex by removing the versions with smaller or equal -// revision than the given atRev except the largest one (If the largest one is -// a tombstone, it will not be kept). -// If a generation becomes empty during compaction, it will be removed. -func (ki *keyIndex) compact(atRev int64, available map[revision]struct{}) { - if ki.isEmpty() { - plog.Panicf("store.keyindex: unexpected compact on empty keyIndex %s", string(ki.key)) - } - - // walk until reaching the first revision that has an revision smaller or equal to - // the atRev. - // add it to the available map - f := func(rev revision) bool { - if rev.main <= atRev { - available[rev] = struct{}{} - return false - } - return true - } - - i, g := 0, &ki.generations[0] - // find first generation includes atRev or created after atRev - for i < len(ki.generations)-1 { - if tomb := g.revs[len(g.revs)-1].main; tomb > atRev { - break - } - i++ - g = &ki.generations[i] - } - - if !g.isEmpty() { - n := g.walk(f) - // remove the previous contents. - if n != -1 { - g.revs = g.revs[n:] - } - // remove any tombstone - if len(g.revs) == 1 && i != len(ki.generations)-1 { - delete(available, g.revs[0]) - i++ - } - } - // remove the previous generations. - ki.generations = ki.generations[i:] -} - -func (ki *keyIndex) isEmpty() bool { - return len(ki.generations) == 1 && ki.generations[0].isEmpty() -} - -// findGeneration finds out the generation of the keyIndex that the -// given rev belongs to. If the given rev is at the gap of two generations, -// which means that the key does not exist at the given rev, it returns nil. -func (ki *keyIndex) findGeneration(rev int64) *generation { - lastg := len(ki.generations) - 1 - cg := lastg - - for cg >= 0 { - if len(ki.generations[cg].revs) == 0 { - cg-- - continue - } - g := ki.generations[cg] - if cg != lastg { - if tomb := g.revs[len(g.revs)-1].main; tomb <= rev { - return nil - } - } - if g.revs[0].main <= rev { - return &ki.generations[cg] - } - cg-- - } - return nil -} - -func (a *keyIndex) Less(b btree.Item) bool { - return bytes.Compare(a.key, b.(*keyIndex).key) == -1 -} - -func (a *keyIndex) equal(b *keyIndex) bool { - if !bytes.Equal(a.key, b.key) { - return false - } - if a.modified != b.modified { - return false - } - if len(a.generations) != len(b.generations) { - return false - } - for i := range a.generations { - ag, bg := a.generations[i], b.generations[i] - if !ag.equal(bg) { - return false - } - } - return true -} - -func (ki *keyIndex) String() string { - var s string - for _, g := range ki.generations { - s += g.String() - } - return s -} - -// generation contains multiple revisions of a key. -type generation struct { - ver int64 - created revision // when the generation is created (put in first revision). - revs []revision -} - -func (g *generation) isEmpty() bool { return g == nil || len(g.revs) == 0 } - -// walk walks through the revisions in the generation in descending order. -// It passes the revision to the given function. -// walk returns until: 1. it finishes walking all pairs 2. the function returns false. -// walk returns the position at where it stopped. If it stopped after -// finishing walking, -1 will be returned. -func (g *generation) walk(f func(rev revision) bool) int { - l := len(g.revs) - for i := range g.revs { - ok := f(g.revs[l-i-1]) - if !ok { - return l - i - 1 - } - } - return -1 -} - -func (g *generation) String() string { - return fmt.Sprintf("g: created[%d] ver[%d], revs %#v\n", g.created, g.ver, g.revs) -} - -func (a generation) equal(b generation) bool { - if a.ver != b.ver { - return false - } - if len(a.revs) != len(b.revs) { - return false - } - - for i := range a.revs { - ar, br := a.revs[i], b.revs[i] - if ar != br { - return false - } - } - return true -} diff --git a/vendor/github.com/coreos/etcd/mvcc/kv.go b/vendor/github.com/coreos/etcd/mvcc/kv.go deleted file mode 100644 index 6636347aa4310..0000000000000 --- a/vendor/github.com/coreos/etcd/mvcc/kv.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" -) - -type RangeOptions struct { - Limit int64 - Rev int64 - Count bool -} - -type RangeResult struct { - KVs []mvccpb.KeyValue - Rev int64 - Count int -} - -type ReadView interface { - // FirstRev returns the first KV revision at the time of opening the txn. - // After a compaction, the first revision increases to the compaction - // revision. - FirstRev() int64 - - // Rev returns the revision of the KV at the time of opening the txn. - Rev() int64 - - // Range gets the keys in the range at rangeRev. - // The returned rev is the current revision of the KV when the operation is executed. - // If rangeRev <=0, range gets the keys at currentRev. - // If `end` is nil, the request returns the key. - // If `end` is not nil and not empty, it gets the keys in range [key, range_end). - // If `end` is not nil and empty, it gets the keys greater than or equal to key. - // Limit limits the number of keys returned. - // If the required rev is compacted, ErrCompacted will be returned. - Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) -} - -// TxnRead represents a read-only transaction with operations that will not -// block other read transactions. -type TxnRead interface { - ReadView - // End marks the transaction is complete and ready to commit. - End() -} - -type WriteView interface { - // DeleteRange deletes the given range from the store. - // A deleteRange increases the rev of the store if any key in the range exists. - // The number of key deleted will be returned. - // The returned rev is the current revision of the KV when the operation is executed. - // It also generates one event for each key delete in the event history. - // if the `end` is nil, deleteRange deletes the key. - // if the `end` is not nil, deleteRange deletes the keys in range [key, range_end). - DeleteRange(key, end []byte) (n, rev int64) - - // Put puts the given key, value into the store. Put also takes additional argument lease to - // attach a lease to a key-value pair as meta-data. KV implementation does not validate the lease - // id. - // A put also increases the rev of the store, and generates one event in the event history. - // The returned rev is the current revision of the KV when the operation is executed. - Put(key, value []byte, lease lease.LeaseID) (rev int64) -} - -// TxnWrite represents a transaction that can modify the store. -type TxnWrite interface { - TxnRead - WriteView - // Changes gets the changes made since opening the write txn. - Changes() []mvccpb.KeyValue -} - -// txnReadWrite coerces a read txn to a write, panicking on any write operation. -type txnReadWrite struct{ TxnRead } - -func (trw *txnReadWrite) DeleteRange(key, end []byte) (n, rev int64) { panic("unexpected DeleteRange") } -func (trw *txnReadWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { - panic("unexpected Put") -} -func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { return nil } - -func NewReadOnlyTxnWrite(txn TxnRead) TxnWrite { return &txnReadWrite{txn} } - -type KV interface { - ReadView - WriteView - - // Read creates a read transaction. - Read() TxnRead - - // Write creates a write transaction. - Write() TxnWrite - - // Hash retrieves the hash of KV state and revision. - // This method is designed for consistency checking purposes. - Hash() (hash uint32, revision int64, err error) - - // Compact frees all superseded keys with revisions less than rev. - Compact(rev int64) (<-chan struct{}, error) - - // Commit commits outstanding txns into the underlying backend. - Commit() - - // Restore restores the KV store from a backend. - Restore(b backend.Backend) error - Close() error -} - -// WatchableKV is a KV that can be watched. -type WatchableKV interface { - KV - Watchable -} - -// Watchable is the interface that wraps the NewWatchStream function. -type Watchable interface { - // NewWatchStream returns a WatchStream that can be used to - // watch events happened or happening on the KV. - NewWatchStream() WatchStream -} - -// ConsistentWatchableKV is a WatchableKV that understands the consistency -// algorithm and consistent index. -// If the consistent index of executing entry is not larger than the -// consistent index of ConsistentWatchableKV, all operations in -// this entry are skipped and return empty response. -type ConsistentWatchableKV interface { - WatchableKV - // ConsistentIndex returns the current consistent index of the KV. - ConsistentIndex() uint64 -} diff --git a/vendor/github.com/coreos/etcd/mvcc/kv_view.go b/vendor/github.com/coreos/etcd/mvcc/kv_view.go deleted file mode 100644 index f40ba8edc22b5..0000000000000 --- a/vendor/github.com/coreos/etcd/mvcc/kv_view.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "github.com/coreos/etcd/lease" -) - -type readView struct{ kv KV } - -func (rv *readView) FirstRev() int64 { - tr := rv.kv.Read() - defer tr.End() - return tr.FirstRev() -} - -func (rv *readView) Rev() int64 { - tr := rv.kv.Read() - defer tr.End() - return tr.Rev() -} - -func (rv *readView) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - tr := rv.kv.Read() - defer tr.End() - return tr.Range(key, end, ro) -} - -type writeView struct{ kv KV } - -func (wv *writeView) DeleteRange(key, end []byte) (n, rev int64) { - tw := wv.kv.Write() - defer tw.End() - return tw.DeleteRange(key, end) -} - -func (wv *writeView) Put(key, value []byte, lease lease.LeaseID) (rev int64) { - tw := wv.kv.Write() - defer tw.End() - return tw.Put(key, value, lease) -} diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore.go b/vendor/github.com/coreos/etcd/mvcc/kvstore.go deleted file mode 100644 index 28a508ccb9593..0000000000000 --- a/vendor/github.com/coreos/etcd/mvcc/kvstore.go +++ /dev/null @@ -1,459 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "encoding/binary" - "errors" - "math" - "sync" - "time" - - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" - "github.com/coreos/etcd/pkg/schedule" - "github.com/coreos/pkg/capnslog" - "golang.org/x/net/context" -) - -var ( - keyBucketName = []byte("key") - metaBucketName = []byte("meta") - - consistentIndexKeyName = []byte("consistent_index") - scheduledCompactKeyName = []byte("scheduledCompactRev") - finishedCompactKeyName = []byte("finishedCompactRev") - - ErrCompacted = errors.New("mvcc: required revision has been compacted") - ErrFutureRev = errors.New("mvcc: required revision is a future revision") - ErrCanceled = errors.New("mvcc: watcher is canceled") - ErrClosed = errors.New("mvcc: closed") - - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc") -) - -const ( - // markedRevBytesLen is the byte length of marked revision. - // The first `revBytesLen` bytes represents a normal revision. The last - // one byte is the mark. - markedRevBytesLen = revBytesLen + 1 - markBytePosition = markedRevBytesLen - 1 - markTombstone byte = 't' -) - -var restoreChunkKeys = 10000 // non-const for testing - -// ConsistentIndexGetter is an interface that wraps the Get method. -// Consistent index is the offset of an entry in a consistent replicated log. -type ConsistentIndexGetter interface { - // ConsistentIndex returns the consistent index of current executing entry. - ConsistentIndex() uint64 -} - -type store struct { - ReadView - WriteView - - // mu read locks for txns and write locks for non-txn store changes. - mu sync.RWMutex - - ig ConsistentIndexGetter - - b backend.Backend - kvindex index - - le lease.Lessor - - // revMuLock protects currentRev and compactMainRev. - // Locked at end of write txn and released after write txn unlock lock. - // Locked before locking read txn and released after locking. - revMu sync.RWMutex - // currentRev is the revision of the last completed transaction. - currentRev int64 - // compactMainRev is the main revision of the last compaction. - compactMainRev int64 - - // bytesBuf8 is a byte slice of length 8 - // to avoid a repetitive allocation in saveIndex. - bytesBuf8 []byte - - fifoSched schedule.Scheduler - - stopc chan struct{} -} - -// NewStore returns a new store. It is useful to create a store inside -// mvcc pkg. It should only be used for testing externally. -func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *store { - s := &store{ - b: b, - ig: ig, - kvindex: newTreeIndex(), - - le: le, - - currentRev: 1, - compactMainRev: -1, - - bytesBuf8: make([]byte, 8), - fifoSched: schedule.NewFIFOScheduler(), - - stopc: make(chan struct{}), - } - s.ReadView = &readView{s} - s.WriteView = &writeView{s} - if s.le != nil { - s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() }) - } - - tx := s.b.BatchTx() - tx.Lock() - tx.UnsafeCreateBucket(keyBucketName) - tx.UnsafeCreateBucket(metaBucketName) - tx.Unlock() - s.b.ForceCommit() - - if err := s.restore(); err != nil { - // TODO: return the error instead of panic here? - panic("failed to recover store from backend") - } - - return s -} - -func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) { - if ctx == nil || ctx.Err() != nil { - s.mu.Lock() - select { - case <-s.stopc: - default: - f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } - s.fifoSched.Schedule(f) - } - s.mu.Unlock() - return - } - close(ch) -} - -func (s *store) Hash() (hash uint32, revision int64, err error) { - s.b.ForceCommit() - h, err := s.b.Hash(DefaultIgnores) - return h, s.currentRev, err -} - -func (s *store) Compact(rev int64) (<-chan struct{}, error) { - s.mu.Lock() - defer s.mu.Unlock() - s.revMu.Lock() - defer s.revMu.Unlock() - - if rev <= s.compactMainRev { - ch := make(chan struct{}) - f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } - s.fifoSched.Schedule(f) - return ch, ErrCompacted - } - if rev > s.currentRev { - return nil, ErrFutureRev - } - - start := time.Now() - - s.compactMainRev = rev - - rbytes := newRevBytes() - revToBytes(revision{main: rev}, rbytes) - - tx := s.b.BatchTx() - tx.Lock() - tx.UnsafePut(metaBucketName, scheduledCompactKeyName, rbytes) - tx.Unlock() - // ensure that desired compaction is persisted - s.b.ForceCommit() - - keep := s.kvindex.Compact(rev) - ch := make(chan struct{}) - var j = func(ctx context.Context) { - if ctx.Err() != nil { - s.compactBarrier(ctx, ch) - return - } - if !s.scheduleCompaction(rev, keep) { - s.compactBarrier(nil, ch) - return - } - close(ch) - } - - s.fifoSched.Schedule(j) - - indexCompactionPauseDurations.Observe(float64(time.Since(start) / time.Millisecond)) - return ch, nil -} - -// DefaultIgnores is a map of keys to ignore in hash checking. -var DefaultIgnores map[backend.IgnoreKey]struct{} - -func init() { - DefaultIgnores = map[backend.IgnoreKey]struct{}{ - // consistent index might be changed due to v2 internal sync, which - // is not controllable by the user. - {Bucket: string(metaBucketName), Key: string(consistentIndexKeyName)}: {}, - } -} - -func (s *store) Commit() { - s.mu.Lock() - defer s.mu.Unlock() - - tx := s.b.BatchTx() - tx.Lock() - s.saveIndex(tx) - tx.Unlock() - s.b.ForceCommit() -} - -func (s *store) Restore(b backend.Backend) error { - s.mu.Lock() - defer s.mu.Unlock() - - close(s.stopc) - s.fifoSched.Stop() - - s.b = b - s.kvindex = newTreeIndex() - s.currentRev = 1 - s.compactMainRev = -1 - s.fifoSched = schedule.NewFIFOScheduler() - s.stopc = make(chan struct{}) - - return s.restore() -} - -func (s *store) restore() error { - reportDbTotalSizeInBytesMu.Lock() - b := s.b - reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) } - reportDbTotalSizeInBytesMu.Unlock() - - min, max := newRevBytes(), newRevBytes() - revToBytes(revision{main: 1}, min) - revToBytes(revision{main: math.MaxInt64, sub: math.MaxInt64}, max) - - keyToLease := make(map[string]lease.LeaseID) - - // restore index - tx := s.b.BatchTx() - tx.Lock() - - _, finishedCompactBytes := tx.UnsafeRange(metaBucketName, finishedCompactKeyName, nil, 0) - if len(finishedCompactBytes) != 0 { - s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main - plog.Printf("restore compact to %d", s.compactMainRev) - } - _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0) - scheduledCompact := int64(0) - if len(scheduledCompactBytes) != 0 { - scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main - } - - // index keys concurrently as they're loaded in from tx - keysGauge.Set(0) - rkvc, revc := restoreIntoIndex(s.kvindex) - for { - keys, vals := tx.UnsafeRange(keyBucketName, min, max, int64(restoreChunkKeys)) - if len(keys) == 0 { - break - } - // rkvc blocks if the total pending keys exceeds the restore - // chunk size to keep keys from consuming too much memory. - restoreChunk(rkvc, keys, vals, keyToLease) - if len(keys) < restoreChunkKeys { - // partial set implies final set - break - } - // next set begins after where this one ended - newMin := bytesToRev(keys[len(keys)-1][:revBytesLen]) - newMin.sub++ - revToBytes(newMin, min) - } - close(rkvc) - s.currentRev = <-revc - - // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction. - // the correct revision should be set to compaction revision in the case, not the largest revision - // we have seen. - if s.currentRev < s.compactMainRev { - s.currentRev = s.compactMainRev - } - if scheduledCompact <= s.compactMainRev { - scheduledCompact = 0 - } - - for key, lid := range keyToLease { - if s.le == nil { - panic("no lessor to attach lease") - } - err := s.le.Attach(lid, []lease.LeaseItem{{Key: key}}) - if err != nil { - plog.Errorf("unexpected Attach error: %v", err) - } - } - - tx.Unlock() - - if scheduledCompact != 0 { - s.Compact(scheduledCompact) - plog.Printf("resume scheduled compaction at %d", scheduledCompact) - } - - return nil -} - -type revKeyValue struct { - key []byte - kv mvccpb.KeyValue - kstr string -} - -func restoreIntoIndex(idx index) (chan<- revKeyValue, <-chan int64) { - rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1) - go func() { - currentRev := int64(1) - defer func() { revc <- currentRev }() - // restore the tree index from streaming the unordered index. - kiCache := make(map[string]*keyIndex, restoreChunkKeys) - for rkv := range rkvc { - ki, ok := kiCache[rkv.kstr] - // purge kiCache if many keys but still missing in the cache - if !ok && len(kiCache) >= restoreChunkKeys { - i := 10 - for k := range kiCache { - delete(kiCache, k) - if i--; i == 0 { - break - } - } - } - // cache miss, fetch from tree index if there - if !ok { - ki = &keyIndex{key: rkv.kv.Key} - if idxKey := idx.KeyIndex(ki); idxKey != nil { - kiCache[rkv.kstr], ki = idxKey, idxKey - ok = true - } - } - rev := bytesToRev(rkv.key) - currentRev = rev.main - if ok { - if isTombstone(rkv.key) { - ki.tombstone(rev.main, rev.sub) - continue - } - ki.put(rev.main, rev.sub) - } else if !isTombstone(rkv.key) { - ki.restore(revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version) - idx.Insert(ki) - kiCache[rkv.kstr] = ki - } - } - }() - return rkvc, revc -} - -func restoreChunk(kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) { - for i, key := range keys { - rkv := revKeyValue{key: key} - if err := rkv.kv.Unmarshal(vals[i]); err != nil { - plog.Fatalf("cannot unmarshal event: %v", err) - } - rkv.kstr = string(rkv.kv.Key) - if isTombstone(key) { - delete(keyToLease, rkv.kstr) - } else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease { - keyToLease[rkv.kstr] = lid - } else { - delete(keyToLease, rkv.kstr) - } - kvc <- rkv - } -} - -func (s *store) Close() error { - close(s.stopc) - s.fifoSched.Stop() - return nil -} - -func (a *store) Equal(b *store) bool { - if a.currentRev != b.currentRev { - return false - } - if a.compactMainRev != b.compactMainRev { - return false - } - return a.kvindex.Equal(b.kvindex) -} - -func (s *store) saveIndex(tx backend.BatchTx) { - if s.ig == nil { - return - } - bs := s.bytesBuf8 - binary.BigEndian.PutUint64(bs, s.ig.ConsistentIndex()) - // put the index into the underlying backend - // tx has been locked in TxnBegin, so there is no need to lock it again - tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs) -} - -func (s *store) ConsistentIndex() uint64 { - // TODO: cache index in a uint64 field? - tx := s.b.BatchTx() - tx.Lock() - defer tx.Unlock() - _, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0) - if len(vs) == 0 { - return 0 - } - return binary.BigEndian.Uint64(vs[0]) -} - -// appendMarkTombstone appends tombstone mark to normal revision bytes. -func appendMarkTombstone(b []byte) []byte { - if len(b) != revBytesLen { - plog.Panicf("cannot append mark to non normal revision bytes") - } - return append(b, markTombstone) -} - -// isTombstone checks whether the revision bytes is a tombstone. -func isTombstone(b []byte) bool { - return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone -} - -// revBytesRange returns the range of revision bytes at -// the given revision. -func revBytesRange(rev revision) (start, end []byte) { - start = newRevBytes() - revToBytes(rev, start) - - end = newRevBytes() - endRev := revision{main: rev.main, sub: rev.sub + 1} - revToBytes(endRev, end) - - return start, end -} diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go deleted file mode 100644 index bbd38f547f3fc..0000000000000 --- a/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "encoding/binary" - "time" -) - -func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struct{}) bool { - totalStart := time.Now() - defer dbCompactionTotalDurations.Observe(float64(time.Since(totalStart) / time.Millisecond)) - - end := make([]byte, 8) - binary.BigEndian.PutUint64(end, uint64(compactMainRev+1)) - - batchsize := int64(10000) - last := make([]byte, 8+1+8) - for { - var rev revision - - start := time.Now() - tx := s.b.BatchTx() - tx.Lock() - - keys, _ := tx.UnsafeRange(keyBucketName, last, end, batchsize) - for _, key := range keys { - rev = bytesToRev(key) - if _, ok := keep[rev]; !ok { - tx.UnsafeDelete(keyBucketName, key) - } - } - - if len(keys) < int(batchsize) { - rbytes := make([]byte, 8+1+8) - revToBytes(revision{main: compactMainRev}, rbytes) - tx.UnsafePut(metaBucketName, finishedCompactKeyName, rbytes) - tx.Unlock() - plog.Printf("finished scheduled compaction at %d (took %v)", compactMainRev, time.Since(totalStart)) - return true - } - - // update last - revToBytes(revision{main: rev.main, sub: rev.sub + 1}, last) - tx.Unlock() - dbCompactionPauseDurations.Observe(float64(time.Since(start) / time.Millisecond)) - - select { - case <-time.After(100 * time.Millisecond): - case <-s.stopc: - return false - } - } -} diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go deleted file mode 100644 index 13d4d530d0ae0..0000000000000 --- a/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" -) - -type storeTxnRead struct { - s *store - tx backend.ReadTx - - firstRev int64 - rev int64 -} - -func (s *store) Read() TxnRead { - s.mu.RLock() - tx := s.b.ReadTx() - s.revMu.RLock() - tx.Lock() - firstRev, rev := s.compactMainRev, s.currentRev - s.revMu.RUnlock() - return newMetricsTxnRead(&storeTxnRead{s, tx, firstRev, rev}) -} - -func (tr *storeTxnRead) FirstRev() int64 { return tr.firstRev } -func (tr *storeTxnRead) Rev() int64 { return tr.rev } - -func (tr *storeTxnRead) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - return tr.rangeKeys(key, end, tr.Rev(), ro) -} - -func (tr *storeTxnRead) End() { - tr.tx.Unlock() - tr.s.mu.RUnlock() -} - -type storeTxnWrite struct { - *storeTxnRead - tx backend.BatchTx - // beginRev is the revision where the txn begins; it will write to the next revision. - beginRev int64 - changes []mvccpb.KeyValue -} - -func (s *store) Write() TxnWrite { - s.mu.RLock() - tx := s.b.BatchTx() - tx.Lock() - tw := &storeTxnWrite{ - storeTxnRead: &storeTxnRead{s, tx, 0, 0}, - tx: tx, - beginRev: s.currentRev, - changes: make([]mvccpb.KeyValue, 0, 4), - } - return newMetricsTxnWrite(tw) -} - -func (tw *storeTxnWrite) Rev() int64 { return tw.beginRev } - -func (tw *storeTxnWrite) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - rev := tw.beginRev - if len(tw.changes) > 0 { - rev++ - } - return tw.rangeKeys(key, end, rev, ro) -} - -func (tw *storeTxnWrite) DeleteRange(key, end []byte) (int64, int64) { - if n := tw.deleteRange(key, end); n != 0 || len(tw.changes) > 0 { - return n, int64(tw.beginRev + 1) - } - return 0, int64(tw.beginRev) -} - -func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 { - tw.put(key, value, lease) - return int64(tw.beginRev + 1) -} - -func (tw *storeTxnWrite) End() { - // only update index if the txn modifies the mvcc state. - if len(tw.changes) != 0 { - tw.s.saveIndex(tw.tx) - // hold revMu lock to prevent new read txns from opening until writeback. - tw.s.revMu.Lock() - tw.s.currentRev++ - } - tw.tx.Unlock() - if len(tw.changes) != 0 { - tw.s.revMu.Unlock() - } - tw.s.mu.RUnlock() -} - -func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) { - rev := ro.Rev - if rev > curRev { - return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev - } - if rev <= 0 { - rev = curRev - } - if rev < tr.s.compactMainRev { - return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted - } - - _, revpairs := tr.s.kvindex.Range(key, end, int64(rev)) - if len(revpairs) == 0 { - return &RangeResult{KVs: nil, Count: 0, Rev: curRev}, nil - } - if ro.Count { - return &RangeResult{KVs: nil, Count: len(revpairs), Rev: curRev}, nil - } - - var kvs []mvccpb.KeyValue - for _, revpair := range revpairs { - start, end := revBytesRange(revpair) - _, vs := tr.tx.UnsafeRange(keyBucketName, start, end, 0) - if len(vs) != 1 { - plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub) - } - - var kv mvccpb.KeyValue - if err := kv.Unmarshal(vs[0]); err != nil { - plog.Fatalf("cannot unmarshal event: %v", err) - } - kvs = append(kvs, kv) - if ro.Limit > 0 && len(kvs) >= int(ro.Limit) { - break - } - } - return &RangeResult{KVs: kvs, Count: len(revpairs), Rev: curRev}, nil -} - -func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { - rev := tw.beginRev + 1 - c := rev - oldLease := lease.NoLease - - // if the key exists before, use its previous created and - // get its previous leaseID - _, created, ver, err := tw.s.kvindex.Get(key, rev) - if err == nil { - c = created.main - oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)}) - } - - ibytes := newRevBytes() - idxRev := revision{main: rev, sub: int64(len(tw.changes))} - revToBytes(idxRev, ibytes) - - ver = ver + 1 - kv := mvccpb.KeyValue{ - Key: key, - Value: value, - CreateRevision: c, - ModRevision: rev, - Version: ver, - Lease: int64(leaseID), - } - - d, err := kv.Marshal() - if err != nil { - plog.Fatalf("cannot marshal event: %v", err) - } - - tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d) - tw.s.kvindex.Put(key, idxRev) - tw.changes = append(tw.changes, kv) - - if oldLease != lease.NoLease { - if tw.s.le == nil { - panic("no lessor to detach lease") - } - err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}}) - if err != nil { - plog.Errorf("unexpected error from lease detach: %v", err) - } - } - if leaseID != lease.NoLease { - if tw.s.le == nil { - panic("no lessor to attach lease") - } - err = tw.s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}}) - if err != nil { - panic("unexpected error from lease Attach") - } - } -} - -func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 { - rrev := tw.beginRev - if len(tw.changes) > 0 { - rrev += 1 - } - keys, revs := tw.s.kvindex.Range(key, end, rrev) - if len(keys) == 0 { - return 0 - } - for i, key := range keys { - tw.delete(key, revs[i]) - } - return int64(len(keys)) -} - -func (tw *storeTxnWrite) delete(key []byte, rev revision) { - ibytes := newRevBytes() - idxRev := revision{main: tw.beginRev + 1, sub: int64(len(tw.changes))} - revToBytes(idxRev, ibytes) - ibytes = appendMarkTombstone(ibytes) - - kv := mvccpb.KeyValue{Key: key} - - d, err := kv.Marshal() - if err != nil { - plog.Fatalf("cannot marshal event: %v", err) - } - - tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d) - err = tw.s.kvindex.Tombstone(key, idxRev) - if err != nil { - plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err) - } - tw.changes = append(tw.changes, kv) - - item := lease.LeaseItem{Key: string(key)} - leaseID := tw.s.le.GetLease(item) - - if leaseID != lease.NoLease { - err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item}) - if err != nil { - plog.Errorf("cannot detach %v", err) - } - } -} - -func (tw *storeTxnWrite) Changes() []mvccpb.KeyValue { return tw.changes } diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics.go b/vendor/github.com/coreos/etcd/mvcc/metrics.go deleted file mode 100644 index a65fe59b996de..0000000000000 --- a/vendor/github.com/coreos/etcd/mvcc/metrics.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "sync" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - rangeCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "range_total", - Help: "Total number of ranges seen by this member.", - }) - - putCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "put_total", - Help: "Total number of puts seen by this member.", - }) - - deleteCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "delete_total", - Help: "Total number of deletes seen by this member.", - }) - - txnCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "txn_total", - Help: "Total number of txns seen by this member.", - }) - - keysGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "keys_total", - Help: "Total number of keys.", - }) - - watchStreamGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "watch_stream_total", - Help: "Total number of watch streams.", - }) - - watcherGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "watcher_total", - Help: "Total number of watchers.", - }) - - slowWatcherGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "slow_watcher_total", - Help: "Total number of unsynced slow watchers.", - }) - - totalEventsCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "events_total", - Help: "Total number of events sent by this member.", - }) - - pendingEventsGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "pending_events_total", - Help: "Total number of pending events to be sent.", - }) - - indexCompactionPauseDurations = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "index_compaction_pause_duration_milliseconds", - Help: "Bucketed histogram of index compaction pause duration.", - // 0.5ms -> 1second - Buckets: prometheus.ExponentialBuckets(0.5, 2, 12), - }) - - dbCompactionPauseDurations = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "db_compaction_pause_duration_milliseconds", - Help: "Bucketed histogram of db compaction pause duration.", - // 1ms -> 4second - Buckets: prometheus.ExponentialBuckets(1, 2, 13), - }) - - dbCompactionTotalDurations = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "db_compaction_total_duration_milliseconds", - Help: "Bucketed histogram of db compaction total duration.", - // 100ms -> 800second - Buckets: prometheus.ExponentialBuckets(100, 2, 14), - }) - - dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "db_total_size_in_bytes", - Help: "Total size of the underlying database in bytes.", - }, - func() float64 { - reportDbTotalSizeInBytesMu.RLock() - defer reportDbTotalSizeInBytesMu.RUnlock() - return reportDbTotalSizeInBytes() - }, - ) - // overridden by mvcc initialization - reportDbTotalSizeInBytesMu sync.RWMutex - reportDbTotalSizeInBytes func() float64 = func() float64 { return 0 } -) - -func init() { - prometheus.MustRegister(rangeCounter) - prometheus.MustRegister(putCounter) - prometheus.MustRegister(deleteCounter) - prometheus.MustRegister(txnCounter) - prometheus.MustRegister(keysGauge) - prometheus.MustRegister(watchStreamGauge) - prometheus.MustRegister(watcherGauge) - prometheus.MustRegister(slowWatcherGauge) - prometheus.MustRegister(totalEventsCounter) - prometheus.MustRegister(pendingEventsGauge) - prometheus.MustRegister(indexCompactionPauseDurations) - prometheus.MustRegister(dbCompactionPauseDurations) - prometheus.MustRegister(dbCompactionTotalDurations) - prometheus.MustRegister(dbTotalSize) -} - -// ReportEventReceived reports that an event is received. -// This function should be called when the external systems received an -// event from mvcc.Watcher. -func ReportEventReceived(n int) { - pendingEventsGauge.Sub(float64(n)) - totalEventsCounter.Add(float64(n)) -} diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go b/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go deleted file mode 100644 index fd2144279aeab..0000000000000 --- a/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "github.com/coreos/etcd/lease" -) - -type metricsTxnWrite struct { - TxnWrite - ranges uint - puts uint - deletes uint -} - -func newMetricsTxnRead(tr TxnRead) TxnRead { - return &metricsTxnWrite{&txnReadWrite{tr}, 0, 0, 0} -} - -func newMetricsTxnWrite(tw TxnWrite) TxnWrite { - return &metricsTxnWrite{tw, 0, 0, 0} -} - -func (tw *metricsTxnWrite) Range(key, end []byte, ro RangeOptions) (*RangeResult, error) { - tw.ranges++ - return tw.TxnWrite.Range(key, end, ro) -} - -func (tw *metricsTxnWrite) DeleteRange(key, end []byte) (n, rev int64) { - tw.deletes++ - return tw.TxnWrite.DeleteRange(key, end) -} - -func (tw *metricsTxnWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { - tw.puts++ - return tw.TxnWrite.Put(key, value, lease) -} - -func (tw *metricsTxnWrite) End() { - defer tw.TxnWrite.End() - if sum := tw.ranges + tw.puts + tw.deletes; sum != 1 { - if sum > 1 { - txnCounter.Inc() - } - return - } - switch { - case tw.ranges == 1: - rangeCounter.Inc() - case tw.puts == 1: - putCounter.Inc() - case tw.deletes == 1: - deleteCounter.Inc() - } -} diff --git a/vendor/github.com/coreos/etcd/mvcc/revision.go b/vendor/github.com/coreos/etcd/mvcc/revision.go deleted file mode 100644 index 5fa35a1c2a26c..0000000000000 --- a/vendor/github.com/coreos/etcd/mvcc/revision.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import "encoding/binary" - -// revBytesLen is the byte length of a normal revision. -// First 8 bytes is the revision.main in big-endian format. The 9th byte -// is a '_'. The last 8 bytes is the revision.sub in big-endian format. -const revBytesLen = 8 + 1 + 8 - -// A revision indicates modification of the key-value space. -// The set of changes that share same main revision changes the key-value space atomically. -type revision struct { - // main is the main revision of a set of changes that happen atomically. - main int64 - - // sub is the the sub revision of a change in a set of changes that happen - // atomically. Each change has different increasing sub revision in that - // set. - sub int64 -} - -func (a revision) GreaterThan(b revision) bool { - if a.main > b.main { - return true - } - if a.main < b.main { - return false - } - return a.sub > b.sub -} - -func newRevBytes() []byte { - return make([]byte, revBytesLen, markedRevBytesLen) -} - -func revToBytes(rev revision, bytes []byte) { - binary.BigEndian.PutUint64(bytes, uint64(rev.main)) - bytes[8] = '_' - binary.BigEndian.PutUint64(bytes[9:], uint64(rev.sub)) -} - -func bytesToRev(bytes []byte) revision { - return revision{ - main: int64(binary.BigEndian.Uint64(bytes[0:8])), - sub: int64(binary.BigEndian.Uint64(bytes[9:])), - } -} - -type revisions []revision - -func (a revisions) Len() int { return len(a) } -func (a revisions) Less(i, j int) bool { return a[j].GreaterThan(a[i]) } -func (a revisions) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/github.com/coreos/etcd/mvcc/util.go b/vendor/github.com/coreos/etcd/mvcc/util.go deleted file mode 100644 index 8a0df0bfcc37a..0000000000000 --- a/vendor/github.com/coreos/etcd/mvcc/util.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "encoding/binary" - - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" -) - -func UpdateConsistentIndex(be backend.Backend, index uint64) { - tx := be.BatchTx() - tx.Lock() - defer tx.Unlock() - - var oldi uint64 - _, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0) - if len(vs) != 0 { - oldi = binary.BigEndian.Uint64(vs[0]) - } - - if index <= oldi { - return - } - - bs := make([]byte, 8) - binary.BigEndian.PutUint64(bs, index) - tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs) -} - -func WriteKV(be backend.Backend, kv mvccpb.KeyValue) { - ibytes := newRevBytes() - revToBytes(revision{main: kv.ModRevision}, ibytes) - - d, err := kv.Marshal() - if err != nil { - plog.Fatalf("cannot marshal event: %v", err) - } - - be.BatchTx().Lock() - be.BatchTx().UnsafePut(keyBucketName, ibytes, d) - be.BatchTx().Unlock() -} diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go deleted file mode 100644 index 68d9ab71d271a..0000000000000 --- a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go +++ /dev/null @@ -1,522 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "sync" - "time" - - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" -) - -const ( - // chanBufLen is the length of the buffered chan - // for sending out watched events. - // TODO: find a good buf value. 1024 is just a random one that - // seems to be reasonable. - chanBufLen = 1024 - - // maxWatchersPerSync is the number of watchers to sync in a single batch - maxWatchersPerSync = 512 -) - -type watchable interface { - watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) - progress(w *watcher) - rev() int64 -} - -type watchableStore struct { - *store - - // mu protects watcher groups and batches. It should never be locked - // before locking store.mu to avoid deadlock. - mu sync.RWMutex - - // victims are watcher batches that were blocked on the watch channel - victims []watcherBatch - victimc chan struct{} - - // contains all unsynced watchers that needs to sync with events that have happened - unsynced watcherGroup - - // contains all synced watchers that are in sync with the progress of the store. - // The key of the map is the key that the watcher watches on. - synced watcherGroup - - stopc chan struct{} - wg sync.WaitGroup -} - -// cancelFunc updates unsynced and synced maps when running -// cancel operations. -type cancelFunc func() - -func New(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) ConsistentWatchableKV { - return newWatchableStore(b, le, ig) -} - -func newWatchableStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *watchableStore { - s := &watchableStore{ - store: NewStore(b, le, ig), - victimc: make(chan struct{}, 1), - unsynced: newWatcherGroup(), - synced: newWatcherGroup(), - stopc: make(chan struct{}), - } - s.store.ReadView = &readView{s} - s.store.WriteView = &writeView{s} - if s.le != nil { - // use this store as the deleter so revokes trigger watch events - s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() }) - } - s.wg.Add(2) - go s.syncWatchersLoop() - go s.syncVictimsLoop() - return s -} - -func (s *watchableStore) Close() error { - close(s.stopc) - s.wg.Wait() - return s.store.Close() -} - -func (s *watchableStore) NewWatchStream() WatchStream { - watchStreamGauge.Inc() - return &watchStream{ - watchable: s, - ch: make(chan WatchResponse, chanBufLen), - cancels: make(map[WatchID]cancelFunc), - watchers: make(map[WatchID]*watcher), - } -} - -func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) { - wa := &watcher{ - key: key, - end: end, - minRev: startRev, - id: id, - ch: ch, - fcs: fcs, - } - - s.mu.Lock() - s.revMu.RLock() - synced := startRev > s.store.currentRev || startRev == 0 - if synced { - wa.minRev = s.store.currentRev + 1 - if startRev > wa.minRev { - wa.minRev = startRev - } - } - if synced { - s.synced.add(wa) - } else { - slowWatcherGauge.Inc() - s.unsynced.add(wa) - } - s.revMu.RUnlock() - s.mu.Unlock() - - watcherGauge.Inc() - - return wa, func() { s.cancelWatcher(wa) } -} - -// cancelWatcher removes references of the watcher from the watchableStore -func (s *watchableStore) cancelWatcher(wa *watcher) { - for { - s.mu.Lock() - - if s.unsynced.delete(wa) { - slowWatcherGauge.Dec() - break - } else if s.synced.delete(wa) { - break - } else if wa.compacted { - break - } - - if !wa.victim { - panic("watcher not victim but not in watch groups") - } - - var victimBatch watcherBatch - for _, wb := range s.victims { - if wb[wa] != nil { - victimBatch = wb - break - } - } - if victimBatch != nil { - slowWatcherGauge.Dec() - delete(victimBatch, wa) - break - } - - // victim being processed so not accessible; retry - s.mu.Unlock() - time.Sleep(time.Millisecond) - } - - watcherGauge.Dec() - s.mu.Unlock() -} - -func (s *watchableStore) Restore(b backend.Backend) error { - s.mu.Lock() - defer s.mu.Unlock() - err := s.store.Restore(b) - if err != nil { - return err - } - - for wa := range s.synced.watchers { - s.unsynced.watchers.add(wa) - } - s.synced = newWatcherGroup() - return nil -} - -// syncWatchersLoop syncs the watcher in the unsynced map every 100ms. -func (s *watchableStore) syncWatchersLoop() { - defer s.wg.Done() - - for { - s.mu.RLock() - st := time.Now() - lastUnsyncedWatchers := s.unsynced.size() - s.mu.RUnlock() - - unsyncedWatchers := 0 - if lastUnsyncedWatchers > 0 { - unsyncedWatchers = s.syncWatchers() - } - syncDuration := time.Since(st) - - waitDuration := 100 * time.Millisecond - // more work pending? - if unsyncedWatchers != 0 && lastUnsyncedWatchers > unsyncedWatchers { - // be fair to other store operations by yielding time taken - waitDuration = syncDuration - } - - select { - case <-time.After(waitDuration): - case <-s.stopc: - return - } - } -} - -// syncVictimsLoop tries to write precomputed watcher responses to -// watchers that had a blocked watcher channel -func (s *watchableStore) syncVictimsLoop() { - defer s.wg.Done() - - for { - for s.moveVictims() != 0 { - // try to update all victim watchers - } - s.mu.RLock() - isEmpty := len(s.victims) == 0 - s.mu.RUnlock() - - var tickc <-chan time.Time - if !isEmpty { - tickc = time.After(10 * time.Millisecond) - } - - select { - case <-tickc: - case <-s.victimc: - case <-s.stopc: - return - } - } -} - -// moveVictims tries to update watches with already pending event data -func (s *watchableStore) moveVictims() (moved int) { - s.mu.Lock() - victims := s.victims - s.victims = nil - s.mu.Unlock() - - var newVictim watcherBatch - for _, wb := range victims { - // try to send responses again - for w, eb := range wb { - // watcher has observed the store up to, but not including, w.minRev - rev := w.minRev - 1 - if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) { - pendingEventsGauge.Add(float64(len(eb.evs))) - } else { - if newVictim == nil { - newVictim = make(watcherBatch) - } - newVictim[w] = eb - continue - } - moved++ - } - - // assign completed victim watchers to unsync/sync - s.mu.Lock() - s.store.revMu.RLock() - curRev := s.store.currentRev - for w, eb := range wb { - if newVictim != nil && newVictim[w] != nil { - // couldn't send watch response; stays victim - continue - } - w.victim = false - if eb.moreRev != 0 { - w.minRev = eb.moreRev - } - if w.minRev <= curRev { - s.unsynced.add(w) - } else { - slowWatcherGauge.Dec() - s.synced.add(w) - } - } - s.store.revMu.RUnlock() - s.mu.Unlock() - } - - if len(newVictim) > 0 { - s.mu.Lock() - s.victims = append(s.victims, newVictim) - s.mu.Unlock() - } - - return moved -} - -// syncWatchers syncs unsynced watchers by: -// 1. choose a set of watchers from the unsynced watcher group -// 2. iterate over the set to get the minimum revision and remove compacted watchers -// 3. use minimum revision to get all key-value pairs and send those events to watchers -// 4. remove synced watchers in set from unsynced group and move to synced group -func (s *watchableStore) syncWatchers() int { - s.mu.Lock() - defer s.mu.Unlock() - - if s.unsynced.size() == 0 { - return 0 - } - - s.store.revMu.RLock() - defer s.store.revMu.RUnlock() - - // in order to find key-value pairs from unsynced watchers, we need to - // find min revision index, and these revisions can be used to - // query the backend store of key-value pairs - curRev := s.store.currentRev - compactionRev := s.store.compactMainRev - - wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev) - minBytes, maxBytes := newRevBytes(), newRevBytes() - revToBytes(revision{main: minRev}, minBytes) - revToBytes(revision{main: curRev + 1}, maxBytes) - - // UnsafeRange returns keys and values. And in boltdb, keys are revisions. - // values are actual key-value pairs in backend. - tx := s.store.b.ReadTx() - tx.Lock() - revs, vs := tx.UnsafeRange(keyBucketName, minBytes, maxBytes, 0) - evs := kvsToEvents(wg, revs, vs) - tx.Unlock() - - var victims watcherBatch - wb := newWatcherBatch(wg, evs) - for w := range wg.watchers { - w.minRev = curRev + 1 - - eb, ok := wb[w] - if !ok { - // bring un-notified watcher to synced - s.synced.add(w) - s.unsynced.delete(w) - continue - } - - if eb.moreRev != 0 { - w.minRev = eb.moreRev - } - - if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) { - pendingEventsGauge.Add(float64(len(eb.evs))) - } else { - if victims == nil { - victims = make(watcherBatch) - } - w.victim = true - } - - if w.victim { - victims[w] = eb - } else { - if eb.moreRev != 0 { - // stay unsynced; more to read - continue - } - s.synced.add(w) - } - s.unsynced.delete(w) - } - s.addVictim(victims) - - vsz := 0 - for _, v := range s.victims { - vsz += len(v) - } - slowWatcherGauge.Set(float64(s.unsynced.size() + vsz)) - - return s.unsynced.size() -} - -// kvsToEvents gets all events for the watchers from all key-value pairs -func kvsToEvents(wg *watcherGroup, revs, vals [][]byte) (evs []mvccpb.Event) { - for i, v := range vals { - var kv mvccpb.KeyValue - if err := kv.Unmarshal(v); err != nil { - plog.Panicf("cannot unmarshal event: %v", err) - } - - if !wg.contains(string(kv.Key)) { - continue - } - - ty := mvccpb.PUT - if isTombstone(revs[i]) { - ty = mvccpb.DELETE - // patch in mod revision so watchers won't skip - kv.ModRevision = bytesToRev(revs[i]).main - } - evs = append(evs, mvccpb.Event{Kv: &kv, Type: ty}) - } - return evs -} - -// notify notifies the fact that given event at the given rev just happened to -// watchers that watch on the key of the event. -func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) { - var victim watcherBatch - for w, eb := range newWatcherBatch(&s.synced, evs) { - if eb.revs != 1 { - plog.Panicf("unexpected multiple revisions in notification") - } - - if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) { - pendingEventsGauge.Add(float64(len(eb.evs))) - } else { - // move slow watcher to victims - w.minRev = rev + 1 - if victim == nil { - victim = make(watcherBatch) - } - w.victim = true - victim[w] = eb - s.synced.delete(w) - slowWatcherGauge.Inc() - } - } - s.addVictim(victim) -} - -func (s *watchableStore) addVictim(victim watcherBatch) { - if victim == nil { - return - } - s.victims = append(s.victims, victim) - select { - case s.victimc <- struct{}{}: - default: - } -} - -func (s *watchableStore) rev() int64 { return s.store.Rev() } - -func (s *watchableStore) progress(w *watcher) { - s.mu.RLock() - defer s.mu.RUnlock() - - if _, ok := s.synced.watchers[w]; ok { - w.send(WatchResponse{WatchID: w.id, Revision: s.rev()}) - // If the ch is full, this watcher is receiving events. - // We do not need to send progress at all. - } -} - -type watcher struct { - // the watcher key - key []byte - // end indicates the end of the range to watch. - // If end is set, the watcher is on a range. - end []byte - - // victim is set when ch is blocked and undergoing victim processing - victim bool - - // compacted is set when the watcher is removed because of compaction - compacted bool - - // minRev is the minimum revision update the watcher will accept - minRev int64 - id WatchID - - fcs []FilterFunc - // a chan to send out the watch response. - // The chan might be shared with other watchers. - ch chan<- WatchResponse -} - -func (w *watcher) send(wr WatchResponse) bool { - progressEvent := len(wr.Events) == 0 - - if len(w.fcs) != 0 { - ne := make([]mvccpb.Event, 0, len(wr.Events)) - for i := range wr.Events { - filtered := false - for _, filter := range w.fcs { - if filter(wr.Events[i]) { - filtered = true - break - } - } - if !filtered { - ne = append(ne, wr.Events[i]) - } - } - wr.Events = ne - } - - // if all events are filtered out, we should send nothing. - if !progressEvent && len(wr.Events) == 0 { - return true - } - select { - case w.ch <- wr: - return true - default: - return false - } -} diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go deleted file mode 100644 index 5c5bfda134137..0000000000000 --- a/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "github.com/coreos/etcd/mvcc/mvccpb" -) - -func (tw *watchableStoreTxnWrite) End() { - changes := tw.Changes() - if len(changes) == 0 { - tw.TxnWrite.End() - return - } - - rev := tw.Rev() + 1 - evs := make([]mvccpb.Event, len(changes)) - for i, change := range changes { - evs[i].Kv = &changes[i] - if change.CreateRevision == 0 { - evs[i].Type = mvccpb.DELETE - evs[i].Kv.ModRevision = rev - } else { - evs[i].Type = mvccpb.PUT - } - } - - // end write txn under watchable store lock so the updates are visible - // when asynchronous event posting checks the current store revision - tw.s.mu.Lock() - tw.s.notify(rev, evs) - tw.TxnWrite.End() - tw.s.mu.Unlock() -} - -type watchableStoreTxnWrite struct { - TxnWrite - s *watchableStore -} - -func (s *watchableStore) Write() TxnWrite { return &watchableStoreTxnWrite{s.store.Write(), s} } diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher.go b/vendor/github.com/coreos/etcd/mvcc/watcher.go deleted file mode 100644 index 9468d42695ca5..0000000000000 --- a/vendor/github.com/coreos/etcd/mvcc/watcher.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "bytes" - "errors" - "sync" - - "github.com/coreos/etcd/mvcc/mvccpb" -) - -var ( - ErrWatcherNotExist = errors.New("mvcc: watcher does not exist") -) - -type WatchID int64 - -// FilterFunc returns true if the given event should be filtered out. -type FilterFunc func(e mvccpb.Event) bool - -type WatchStream interface { - // Watch creates a watcher. The watcher watches the events happening or - // happened on the given key or range [key, end) from the given startRev. - // - // The whole event history can be watched unless compacted. - // If `startRev` <=0, watch observes events after currentRev. - // - // The returned `id` is the ID of this watcher. It appears as WatchID - // in events that are sent to the created watcher through stream channel. - // - Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID - - // Chan returns a chan. All watch response will be sent to the returned chan. - Chan() <-chan WatchResponse - - // RequestProgress requests the progress of the watcher with given ID. The response - // will only be sent if the watcher is currently synced. - // The responses will be sent through the WatchRespone Chan attached - // with this stream to ensure correct ordering. - // The responses contains no events. The revision in the response is the progress - // of the watchers since the watcher is currently synced. - RequestProgress(id WatchID) - - // Cancel cancels a watcher by giving its ID. If watcher does not exist, an error will be - // returned. - Cancel(id WatchID) error - - // Close closes Chan and release all related resources. - Close() - - // Rev returns the current revision of the KV the stream watches on. - Rev() int64 -} - -type WatchResponse struct { - // WatchID is the WatchID of the watcher this response sent to. - WatchID WatchID - - // Events contains all the events that needs to send. - Events []mvccpb.Event - - // Revision is the revision of the KV when the watchResponse is created. - // For a normal response, the revision should be the same as the last - // modified revision inside Events. For a delayed response to a unsynced - // watcher, the revision is greater than the last modified revision - // inside Events. - Revision int64 - - // CompactRevision is set when the watcher is cancelled due to compaction. - CompactRevision int64 -} - -// watchStream contains a collection of watchers that share -// one streaming chan to send out watched events and other control events. -type watchStream struct { - watchable watchable - ch chan WatchResponse - - mu sync.Mutex // guards fields below it - // nextID is the ID pre-allocated for next new watcher in this stream - nextID WatchID - closed bool - cancels map[WatchID]cancelFunc - watchers map[WatchID]*watcher -} - -// Watch creates a new watcher in the stream and returns its WatchID. -// TODO: return error if ws is closed? -func (ws *watchStream) Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID { - // prevent wrong range where key >= end lexicographically - // watch request with 'WithFromKey' has empty-byte range end - if len(end) != 0 && bytes.Compare(key, end) != -1 { - return -1 - } - - ws.mu.Lock() - defer ws.mu.Unlock() - if ws.closed { - return -1 - } - - id := ws.nextID - ws.nextID++ - - w, c := ws.watchable.watch(key, end, startRev, id, ws.ch, fcs...) - - ws.cancels[id] = c - ws.watchers[id] = w - return id -} - -func (ws *watchStream) Chan() <-chan WatchResponse { - return ws.ch -} - -func (ws *watchStream) Cancel(id WatchID) error { - ws.mu.Lock() - cancel, ok := ws.cancels[id] - ok = ok && !ws.closed - if ok { - delete(ws.cancels, id) - delete(ws.watchers, id) - } - ws.mu.Unlock() - if !ok { - return ErrWatcherNotExist - } - cancel() - return nil -} - -func (ws *watchStream) Close() { - ws.mu.Lock() - defer ws.mu.Unlock() - - for _, cancel := range ws.cancels { - cancel() - } - ws.closed = true - close(ws.ch) - watchStreamGauge.Dec() -} - -func (ws *watchStream) Rev() int64 { - ws.mu.Lock() - defer ws.mu.Unlock() - return ws.watchable.rev() -} - -func (ws *watchStream) RequestProgress(id WatchID) { - ws.mu.Lock() - w, ok := ws.watchers[id] - ws.mu.Unlock() - if !ok { - return - } - ws.watchable.progress(w) -} diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go deleted file mode 100644 index 6ef1d0ce8bbee..0000000000000 --- a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "math" - - "github.com/coreos/etcd/mvcc/mvccpb" - "github.com/coreos/etcd/pkg/adt" -) - -var ( - // watchBatchMaxRevs is the maximum distinct revisions that - // may be sent to an unsynced watcher at a time. Declared as - // var instead of const for testing purposes. - watchBatchMaxRevs = 1000 -) - -type eventBatch struct { - // evs is a batch of revision-ordered events - evs []mvccpb.Event - // revs is the minimum unique revisions observed for this batch - revs int - // moreRev is first revision with more events following this batch - moreRev int64 -} - -func (eb *eventBatch) add(ev mvccpb.Event) { - if eb.revs > watchBatchMaxRevs { - // maxed out batch size - return - } - - if len(eb.evs) == 0 { - // base case - eb.revs = 1 - eb.evs = append(eb.evs, ev) - return - } - - // revision accounting - ebRev := eb.evs[len(eb.evs)-1].Kv.ModRevision - evRev := ev.Kv.ModRevision - if evRev > ebRev { - eb.revs++ - if eb.revs > watchBatchMaxRevs { - eb.moreRev = evRev - return - } - } - - eb.evs = append(eb.evs, ev) -} - -type watcherBatch map[*watcher]*eventBatch - -func (wb watcherBatch) add(w *watcher, ev mvccpb.Event) { - eb := wb[w] - if eb == nil { - eb = &eventBatch{} - wb[w] = eb - } - eb.add(ev) -} - -// newWatcherBatch maps watchers to their matched events. It enables quick -// events look up by watcher. -func newWatcherBatch(wg *watcherGroup, evs []mvccpb.Event) watcherBatch { - if len(wg.watchers) == 0 { - return nil - } - - wb := make(watcherBatch) - for _, ev := range evs { - for w := range wg.watcherSetByKey(string(ev.Kv.Key)) { - if ev.Kv.ModRevision >= w.minRev { - // don't double notify - wb.add(w, ev) - } - } - } - return wb -} - -type watcherSet map[*watcher]struct{} - -func (w watcherSet) add(wa *watcher) { - if _, ok := w[wa]; ok { - panic("add watcher twice!") - } - w[wa] = struct{}{} -} - -func (w watcherSet) union(ws watcherSet) { - for wa := range ws { - w.add(wa) - } -} - -func (w watcherSet) delete(wa *watcher) { - if _, ok := w[wa]; !ok { - panic("removing missing watcher!") - } - delete(w, wa) -} - -type watcherSetByKey map[string]watcherSet - -func (w watcherSetByKey) add(wa *watcher) { - set := w[string(wa.key)] - if set == nil { - set = make(watcherSet) - w[string(wa.key)] = set - } - set.add(wa) -} - -func (w watcherSetByKey) delete(wa *watcher) bool { - k := string(wa.key) - if v, ok := w[k]; ok { - if _, ok := v[wa]; ok { - delete(v, wa) - if len(v) == 0 { - // remove the set; nothing left - delete(w, k) - } - return true - } - } - return false -} - -// watcherGroup is a collection of watchers organized by their ranges -type watcherGroup struct { - // keyWatchers has the watchers that watch on a single key - keyWatchers watcherSetByKey - // ranges has the watchers that watch a range; it is sorted by interval - ranges adt.IntervalTree - // watchers is the set of all watchers - watchers watcherSet -} - -func newWatcherGroup() watcherGroup { - return watcherGroup{ - keyWatchers: make(watcherSetByKey), - watchers: make(watcherSet), - } -} - -// add puts a watcher in the group. -func (wg *watcherGroup) add(wa *watcher) { - wg.watchers.add(wa) - if wa.end == nil { - wg.keyWatchers.add(wa) - return - } - - // interval already registered? - ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end)) - if iv := wg.ranges.Find(ivl); iv != nil { - iv.Val.(watcherSet).add(wa) - return - } - - // not registered, put in interval tree - ws := make(watcherSet) - ws.add(wa) - wg.ranges.Insert(ivl, ws) -} - -// contains is whether the given key has a watcher in the group. -func (wg *watcherGroup) contains(key string) bool { - _, ok := wg.keyWatchers[key] - return ok || wg.ranges.Intersects(adt.NewStringAffinePoint(key)) -} - -// size gives the number of unique watchers in the group. -func (wg *watcherGroup) size() int { return len(wg.watchers) } - -// delete removes a watcher from the group. -func (wg *watcherGroup) delete(wa *watcher) bool { - if _, ok := wg.watchers[wa]; !ok { - return false - } - wg.watchers.delete(wa) - if wa.end == nil { - wg.keyWatchers.delete(wa) - return true - } - - ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end)) - iv := wg.ranges.Find(ivl) - if iv == nil { - return false - } - - ws := iv.Val.(watcherSet) - delete(ws, wa) - if len(ws) == 0 { - // remove interval missing watchers - if ok := wg.ranges.Delete(ivl); !ok { - panic("could not remove watcher from interval tree") - } - } - - return true -} - -// choose selects watchers from the watcher group to update -func (wg *watcherGroup) choose(maxWatchers int, curRev, compactRev int64) (*watcherGroup, int64) { - if len(wg.watchers) < maxWatchers { - return wg, wg.chooseAll(curRev, compactRev) - } - ret := newWatcherGroup() - for w := range wg.watchers { - if maxWatchers <= 0 { - break - } - maxWatchers-- - ret.add(w) - } - return &ret, ret.chooseAll(curRev, compactRev) -} - -func (wg *watcherGroup) chooseAll(curRev, compactRev int64) int64 { - minRev := int64(math.MaxInt64) - for w := range wg.watchers { - if w.minRev > curRev { - panic("watcher current revision should not exceed current revision") - } - if w.minRev < compactRev { - select { - case w.ch <- WatchResponse{WatchID: w.id, CompactRevision: compactRev}: - w.compacted = true - wg.delete(w) - default: - // retry next time - } - continue - } - if minRev > w.minRev { - minRev = w.minRev - } - } - return minRev -} - -// watcherSetByKey gets the set of watchers that receive events on the given key. -func (wg *watcherGroup) watcherSetByKey(key string) watcherSet { - wkeys := wg.keyWatchers[key] - wranges := wg.ranges.Stab(adt.NewStringAffinePoint(key)) - - // zero-copy cases - switch { - case len(wranges) == 0: - // no need to merge ranges or copy; reuse single-key set - return wkeys - case len(wranges) == 0 && len(wkeys) == 0: - return nil - case len(wranges) == 1 && len(wkeys) == 0: - return wranges[0].Val.(watcherSet) - } - - // copy case - ret := make(watcherSet) - ret.union(wg.keyWatchers[key]) - for _, item := range wranges { - ret.union(item.Val.(watcherSet)) - } - return ret -} diff --git a/vendor/github.com/coreos/gofail/gofail.go b/vendor/github.com/coreos/gofail/gofail.go deleted file mode 100644 index 920899f72def1..0000000000000 --- a/vendor/github.com/coreos/gofail/gofail.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// gofail is a tool for enabling/disabling failpoints in go code. -package main - -import ( - "fmt" - "go/build" - "io" - "os" - "path" - "path/filepath" - "strings" - - "github.com/coreos/gofail/code" -) - -type xfrmFunc func(io.Writer, io.Reader) ([]*code.Failpoint, error) - -func xfrmFile(xfrm xfrmFunc, path string) ([]*code.Failpoint, error) { - src, serr := os.Open(path) - if serr != nil { - return nil, serr - } - defer src.Close() - - dst, derr := os.OpenFile(path+".tmp", os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) - if derr != nil { - return nil, derr - } - defer dst.Close() - - fps, xerr := xfrm(dst, src) - if xerr != nil || len(fps) == 0 { - os.Remove(dst.Name()) - return nil, xerr - } - - rerr := os.Rename(dst.Name(), path) - if rerr != nil { - os.Remove(dst.Name()) - return nil, rerr - } - - return fps, nil -} - -func dir2files(dir, ext string) (ret []string, err error) { - if dir, err = filepath.Abs(dir); err != nil { - return nil, err - } - - f, ferr := os.Open(dir) - if ferr != nil { - return nil, ferr - } - defer f.Close() - - names, rerr := f.Readdirnames(0) - if rerr != nil { - return nil, rerr - } - for _, f := range names { - if path.Ext(f) != ext { - continue - } - ret = append(ret, path.Join(dir, f)) - } - return ret, nil -} - -func paths2files(paths []string) (files []string) { - // no paths => use cwd - if len(paths) == 0 { - wd, gerr := os.Getwd() - if gerr != nil { - fmt.Println(gerr) - os.Exit(1) - } - return paths2files([]string{wd}) - } - for _, p := range paths { - s, serr := os.Stat(p) - if serr != nil { - fmt.Println(serr) - os.Exit(1) - } - if s.IsDir() { - fs, err := dir2files(p, ".go") - if err != nil { - fmt.Println(err) - os.Exit(1) - } - files = append(files, fs...) - } else if path.Ext(s.Name()) == ".go" { - abs, err := filepath.Abs(p) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - files = append(files, abs) - } - } - return files -} - -func writeBinding(file string, fps []*code.Failpoint) { - if len(fps) == 0 { - return - } - fname := strings.Split(path.Base(file), ".go")[0] + ".fail.go" - out, err := os.Create(path.Join(path.Dir(file), fname)) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - // XXX: support "package main" - pkgAbsDir := path.Dir(file) - pkg := path.Base(pkgAbsDir) - pkgDir := "" - for _, srcdir := range build.Default.SrcDirs() { - if strings.HasPrefix(pkgAbsDir, srcdir) { - pkgDir = strings.Replace(pkgAbsDir, srcdir, "", 1) - break - } - } - fppath := pkg - if pkgDir == "" { - fmt.Fprintf( - os.Stderr, - "missing package for %q; using %q as failpoint path\n", - pkgAbsDir, - pkg) - } else { - fppath = pkgDir[1:] - } - code.NewBinding(pkg, fppath, fps).Write(out) - out.Close() -} - -func main() { - if len(os.Args) < 2 { - fmt.Println("not enough arguments") - os.Exit(1) - } - - var xfrm xfrmFunc - enable := false - switch os.Args[1] { - case "enable": - xfrm = code.ToFailpoints - enable = true - case "disable": - xfrm = code.ToComments - default: - fmt.Println("expected enable or disable") - os.Exit(1) - } - - files := paths2files(os.Args[2:]) - fps := [][]*code.Failpoint{} - for _, path := range files { - curfps, err := xfrmFile(xfrm, path) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - fps = append(fps, curfps) - } - - if enable { - // build runtime bindings .fail.go - for i := range files { - writeBinding(files[i], fps[i]) - } - } else { - // remove all runtime bindings - for i := range files { - fname := strings.Split(path.Base(files[i]), ".go")[0] + ".fail.go" - os.Remove(path.Join(path.Dir(files[i]), fname)) - } - } -} diff --git a/vendor/github.com/cznic/golex/doc.go b/vendor/github.com/cznic/golex/doc.go deleted file mode 100644 index 904601b9d711f..0000000000000 --- a/vendor/github.com/cznic/golex/doc.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright (c) 2014 The golex Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Golex is a lex/flex like (not fully POSIX lex compatible) utility. -// It renders .l formated data (http://flex.sourceforge.net/manual/Format.html#Format) to Go source code. -// The .l data can come from a file named in a command line argument. -// If no non-opt args are given, golex reads stdin. -// -// Options: -// -DFA print the DFA to stdout and quit -// -nodfaopt disable DFA optimization - don't use this for production code -// -o fname write to file `fname`, default is `lex.yy.go` -// -t write to stdout -// -v write some scanner statistics to stderr -// -32bit assume unicode rune lexer (partially implemented, disabled) -// -// To get the latest golex version: -// -// $ go get -u github.com/cznic/golex -// -// Run time library -// -// Please see http://godoc.org/github.com/cznic/golex/lex. -// -// Changelog -// -// 2014-11-18: Golex now supports %yym - a hook which can be used to mark an -// accepting state. -// -// Implementing POSIX-like longest match -// -// Consider for example this .l file: -// -// $ cat main.l -// %{ -// package main -// -// import ( -// "flag" -// "fmt" -// ) -// -// var ( -// c byte -// src string -// in []byte -// un []byte -// mark int -// ) -// -// func lex() (s string) { -// %} -// -// %yyn next() -// %yyc c -// %yym fmt.Printf("\tstate accepts: %q\n", in); mark = len(in) -// -// %% -// in = in[:0] -// mark = -1 -// -// \0 -// return "EOF" -// -// a([bcd]*z([efg]*z)?)? -// return fmt.Sprintf("match(%q)", in) -// -// %% -// if mark >= 0 { -// if len(in) > mark { -// unget(c) -// for i := len(in)-1; i >= mark; i-- { -// unget(in[i]) -// } -// next() -// } -// in = in[:mark] -// goto yyAction // Hook: Execute the semantic action of the last matched rule. -// } -// -// switch n := len(in); n { -// case 0: // [] z -// s = fmt.Sprintf("%q", c) -// next() -// case 1: // [x] z -// s = fmt.Sprintf("%q", in[0]) -// default: // [x, y, ...], z -// s = fmt.Sprintf("%q", in[0]) -// unget(c) // z -// for i := n - 1; i > 1; i-- { -// unget(in[i]) // ... -// } -// c = in[1] // y -// } -// return s -// } -// -// func next() { -// if len(un) != 0 { -// c = un[len(un)-1] -// un = un[:len(un)-1] -// return -// } -// -// in = append(in, c) -// if len(src) == 0 { -// c = 0 -// return -// } -// -// c = src[0] -// fmt.Printf("\tnext: %q\n", c) -// src = src[1:] -// } -// -// func unget(b byte) { -// un = append(un, b) -// } -// -// func main() { -// flag.Parse() -// if flag.NArg() > 0 { -// src = flag.Arg(0) -// } -// next() -// for { -// s := lex() -// fmt.Println(s) -// if s == "EOF" { -// break -// } -// } -// } -// $ -// -// Execution and output: -// -// $ golex -o main.go main.l && go run main.go abzez0abzefgxy -// next: 'a' -// next: 'b' -// state accepts: "a" -// next: 'z' -// next: 'e' -// state accepts: "abz" -// next: 'z' -// next: '0' -// state accepts: "abzez" -// match("abzez") -// next: 'a' -// '0' -// next: 'b' -// state accepts: "a" -// next: 'z' -// next: 'e' -// state accepts: "abz" -// next: 'f' -// next: 'g' -// next: 'x' -// match("abz") -// 'e' -// 'f' -// 'g' -// next: 'y' -// 'x' -// 'y' -// state accepts: "\x00" -// EOF -// $ -// -// 2014-11-15: Golex's output is now gofmt'ed, if possible. -// -// Missing/differing functionality of the current renderer (compared to flex): -// - No runtime tokenizer package/environment -// (but the freedom to have/write any fitting one's specific task(s)). -// - The generated FSM picks the rules in the order of their appearance in the .l source, -// but "flex picks the rule that matches the most text". -// - And probably more. -// Further limitations on the .l source are listed in the cznic/lex package godocs. -// -// A simple golex program example (make example1 && ./example1): -// -// %{ -// package main -// -// import ( -// "bufio" -// "fmt" -// "log" -// "os" -// ) -// -// var ( -// src = bufio.NewReader(os.Stdin) -// buf []byte -// current byte -// ) -// -// func getc() byte { -// if current != 0 { -// buf = append(buf, current) -// } -// current = 0 -// if b, err := src.ReadByte(); err == nil { -// current = b -// } -// return current -// } -// -// // %yyc is a "macro" to access the "current" character. -// // -// // %yyn is a "macro" to move to the "next" character. -// // -// // %yyb is a "macro" to return the begining-of-line status (a bool typed value). -// // It is used for patterns like `^re`. -// // Example: %yyb prev == 0 || prev == '\n' -// // -// // %yyt is a "macro" to return the top/current start condition (an int typed value). -// // It is used when there are patterns with conditions like `re`. -// // Example: %yyt startCond -// -// func main() { // This left brace is closed by *1 -// c := getc() // init -// %} -// -// %yyc c -// %yyn c = getc() -// -// D [0-9]+ -// -// %% -// buf = buf[:0] // Code before the first rule is executed before every scan cycle (state 0 action) -// -// [ \t\n\r]+ // Ignore whitespace -// -// {D} fmt.Printf("int %q\n", buf) -// -// {D}\.{D}?|\.{D} fmt.Printf("float %q\n", buf) -// -// \0 return // Exit on EOF or any other error -// -// . fmt.Printf("%q\n", buf) // Printout any other unrecognized stuff -// -// %% -// // The rendered scanner enters top of the user code section when -// // lexem recongition fails. In this example it should never happen. -// log.Fatal("scanner internal error") -// -// } // *1 this right brace -package main diff --git a/vendor/github.com/cznic/golex/main.go b/vendor/github.com/cznic/golex/main.go deleted file mode 100644 index ae5973d7e1b30..0000000000000 --- a/vendor/github.com/cznic/golex/main.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2014 The golex Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bufio" - "bytes" - "flag" - "fmt" - "go/format" - "io" - "log" - "os" - - "github.com/cznic/lex" -) - -const ( - OFILE = "lex.yy.go" -) - -var ( - stdin = bufio.NewReader(os.Stdin) - stdout = bufio.NewWriter(os.Stdout) - stderr = bufio.NewWriter(os.Stderr) -) - -type renderer interface { - render(srcname string, l *lex.L) -} - -type writer interface { - io.Writer - wprintf(s string, args ...interface{}) (n int, err error) -} - -type noRender struct { - w io.Writer -} - -func (r *noRender) Write(p []byte) (n int, err error) { - return r.w.Write(p) -} - -func (r *noRender) wprintf(s string, args ...interface{}) (n int, err error) { - n, err = io.WriteString(r.w, fmt.Sprintf(s, args...)) - if err != nil { - log.Fatal(err) - } - - return -} - -func q(c uint32) string { - switch c { - default: - s := fmt.Sprintf("%q", string(c)) - return "'" + s[1:len(s)-1] + "'" - case '\'': - return "'\\''" - case '"': - return "'\"'" - } - - panic("unreachable") -} - -func main() { - log.SetFlags(log.Flags() | log.Lshortfile) - oflag := "" - var dfaflag, hflag, tflag, vflag, nodfaopt, bits32 bool - - flag.BoolVar(&dfaflag, "DFA", false, "write DFA on stdout and quit") - flag.BoolVar(&hflag, "h", false, "show help and exit") - flag.StringVar(&oflag, "o", OFILE, "lexer output") - flag.BoolVar(&tflag, "t", false, "write scanner on stdout instead of "+OFILE) - flag.BoolVar(&vflag, "v", false, "write summary of scanner statistics to stderr") - flag.BoolVar(&nodfaopt, "nodfaopt", false, "disable DFA optimization - don't use this for production code") - //flag.BoolVar(&bits32, "32bit", false, "assume unicode rune lexer (partially implemented)") - flag.Parse() - if hflag || flag.NArg() > 1 { - flag.Usage() - fmt.Fprintf(stderr, "\n%s [-o out_name] [other_options] [in_name]\n", os.Args[0]) - fmt.Fprintln(stderr, " If no in_name is given then read from stdin.") - stderr.Flush() - os.Exit(1) - } - - var ( - lfile *bufio.Reader // source .l - gofile *bufio.Writer // dest .go - ) - - lname := flag.Arg(0) - if lname == "" { - lfile = stdin - } else { - l, err := os.Open(lname) - if err != nil { - log.Fatal(err) - } - - defer l.Close() - lfile = bufio.NewReader(l) - } - - l, err := lex.NewL(lname, lfile, nodfaopt, bits32) - if err != nil { - log.Fatal(err) - } - - if dfaflag { - fmt.Println(l.DfaString()) - os.Exit(1) - } - - if tflag { - gofile = stdout - } else { - if oflag == "" { - oflag = OFILE - } - g, err := os.Create(oflag) - if err != nil { - log.Fatal(err) - } - - defer g.Close() - gofile = bufio.NewWriter(g) - } - defer gofile.Flush() - var buf bytes.Buffer - renderGo{noRender{&buf}, map[int]bool{}}.render(lname, l) - dst, err := format.Source(buf.Bytes()) - switch { - case err != nil: - fmt.Fprintf(os.Stderr, "%v\n", err) - if _, err := gofile.Write(buf.Bytes()); err != nil { - log.Fatal(err) - } - default: - if _, err := gofile.Write(dst); err != nil { - log.Fatal(err) - } - } - - if vflag { - fmt.Fprintln(os.Stderr, l.String()) - } -} diff --git a/vendor/github.com/cznic/golex/render.go b/vendor/github.com/cznic/golex/render.go deleted file mode 100644 index e9ed490a213fc..0000000000000 --- a/vendor/github.com/cznic/golex/render.go +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright (c) 2014 The golex Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "fmt" - "github.com/cznic/lex" - "github.com/cznic/lexer" - "log" - "sort" - "strings" -) - -type renderGo struct { - noRender - scStates map[int]bool -} - -func (r *renderGo) prolog(l *lex.L) { - for _, state := range l.StartConditionsStates { - r.scStates[int(state.Index)] = true - } - for _, state := range l.StartConditionsBolStates { - r.scStates[int(state.Index)] = true - } - r.w.Write([]byte("// CAUTION: Generated file - DO NOT EDIT.\n\n")) - for _, line := range l.DefCode { - r.w.Write([]byte(line)) - } - r.wprintf("\nyystate0:\n") - if l.YYM != "yym" { - r.wprintf("yyrule := -1\n_ = yyrule") - } - if action0 := l.Rules[0].Action; action0 != "" { - r.w.Write([]byte(action0)) - } - scNames := map[int]string{} - for name, i := range l.StartConditions { - scNames[i] = name - } - if len(l.StartConditionsStates) > 1 || len(l.StartConditionsBolStates) != 0 { - if len(l.StartConditionsBolStates) == 0 { - r.wprintf("\n\nswitch yyt := %s; yyt {\n", l.YYT) - } else { - r.wprintf("\n\nswitch yyt, yyb := %s, %s; yyt {\n", l.YYT, l.YYB) - } - r.wprintf("default:\npanic(fmt.Errorf(`invalid start condition %%d`, yyt))\n") - - // Stabilize map ranging - x := []int{} - for sc := range l.StartConditionsStates { - x = append(x, sc) - } - sort.Ints(x) - - for _, sc := range x { - state := l.StartConditionsStates[sc] - r.wprintf("case %d: // start condition: %s\n", sc, scNames[sc]) - if state, ok := l.StartConditionsBolStates[sc]; ok { - r.wprintf("if yyb { goto yystart%d }\n", state.Index) - } - r.wprintf("goto yystart%d\n", state.Index) - } - r.wprintf("}\n\n") - } else { - r.wprintf("\n\ngoto yystart%d\n\n", l.StartConditionsStates[0].Index) - } -} - -func isReturn(code string) bool { - const ret = "return" - lenret := len(ret) - lines := strings.Split(code, "\n") - for { - l := len(lines) - if l == 0 { - break - } - - line := strings.TrimSpace(lines[l-1]) - if line == "" { - lines = lines[:l-1] - continue - } - - if len(line) >= lenret && line[:lenret] == ret { - if len(line) == lenret { - return true - } - - if c := line[lenret]; c == ' ' || c == '\t' { - return true - } - } - - break - - } - return false -} - -func (r *renderGo) rules(l *lex.L) { - for i := 1; i < len(l.Rules); i++ { - rule := l.Rules[i] - r.wprintf("yyrule%d: // %s\n", i, rule.Pattern) - act := strings.TrimSpace(rule.Action) - if act != "" && act != "|" { - r.wprintf("{\n") - r.w.Write([]byte(rule.Action)) - } - if act != "|" { - r.wprintf("\n") - if !isReturn(rule.Action) { - r.wprintf("goto yystate0\n") - } - } - if act != "" && act != "|" { - r.wprintf("}\n") - } - } - r.wprintf(`panic("unreachable")` + "\n") -} - -func (r *renderGo) scanFail(l *lex.L) { - r.wprintf("\ngoto yyabort // silence unused label error\n") - r.wprintf("\nyyabort: // no lexem recognized\n") -} - -func (r *renderGo) userCode(l *lex.L) { - if userCode := l.UserCode; userCode != "" { - r.w.Write([]byte(userCode)) - } -} - -func (r *renderGo) defaultTransition(l *lex.L, state *lexer.NfaState) (defaultEdge *lexer.RangesEdge) { - r.wprintf("default:\n") - if rule, ok := l.Accepts[state]; ok { - r.wprintf("goto yyrule%d\n", rule) - return - } - - cases := map[rune]bool{} - for i := 0; i < 256; i++ { - cases[rune(i)] = true - } - for _, edge0 := range state.Consuming { - switch edge := edge0.(type) { - default: - log.Fatalf("unexpected type %T", edge0) - case *lexer.RuneEdge: - delete(cases, edge.Rune) - case *lexer.RangesEdge: - if defaultEdge == nil || len(edge.Ranges.R32) > len(defaultEdge.Ranges.R32) { - defaultEdge = edge - } - for _, rng := range edge.Ranges.R32 { - for c := rng.Lo; c <= rng.Hi; c += rng.Stride { - delete(cases, rune(c)) - } - } - } - } - if len(cases) != 0 { - r.wprintf("goto yyabort\n") - return nil - } - - if defaultEdge != nil { - r.wprintf("goto yystate%d // %s\n", defaultEdge.Target().Index, r.rangesEdgeString(defaultEdge, l)) - return - } - - panic("internal error") -} - -func (r *renderGo) rangesEdgeString(edge *lexer.RangesEdge, l *lex.L) string { - a := []string{} - for _, rng := range edge.Ranges.R32 { - if rng.Stride != 1 { - panic("internal error") - } - - if rng.Hi-rng.Lo == 1 { - a = append(a, fmt.Sprintf("%s == %s || %s == %s", l.YYC, q(rng.Lo), l.YYC, q(rng.Hi))) - continue - } - - if rng.Hi-rng.Lo > 0 { - a = append(a, fmt.Sprintf("%s >= %s && %s <= %s", l.YYC, q(rng.Lo), l.YYC, q(rng.Hi))) - continue - } - - // rng.Hi == rng.Lo - a = append(a, fmt.Sprintf("%s == %s", l.YYC, q(rng.Lo))) - } - return strings.Replace(strings.Join(a, " || "), "%", "%%", -1) -} - -func (r *renderGo) transitions(l *lex.L, state *lexer.NfaState) { - r.wprintf("switch {\n") - var defaultEdge lexer.Edger = r.defaultTransition(l, state) - - // Stabilize case order - a := []string{} - m := map[string]uint{} - for _, edge0 := range state.Consuming { - if edge0 == defaultEdge { - continue - } - - s := "" - switch edge := edge0.(type) { - default: - log.Fatalf("unexpected type %T", edge0) - case *lexer.RuneEdge: - s = fmt.Sprintf("%s == %s", l.YYC, q(uint32(edge.Rune))) - case *lexer.RangesEdge: - s = fmt.Sprintf(r.rangesEdgeString(edge, l)) - } - a = append(a, s) - m[s] = edge0.Target().Index - } - sort.Strings(a) - for _, s := range a { - r.wprintf("case %s:\ngoto yystate%d\n", s, m[s]) - } - - r.wprintf("}\n\n") -} - -func (r *renderGo) states(l *lex.L) { - yym := l.YYM != "yym" - r.wprintf("goto yystate%d // silence unused label error\n", 0) - if yym { - r.wprintf("goto yyAction // silence unused label error\n") - r.wprintf("yyAction:\n") - r.wprintf("switch yyrule {\n") - for i := range l.Rules[1:] { - r.wprintf("case %d:\ngoto yyrule%d\n", i+1, i+1) - } - r.wprintf("}\n") - } - for _, state := range l.Dfa { - iState := int(state.Index) - if _, ok := r.scStates[iState]; ok { - r.wprintf("goto yystate%d // silence unused label error\n", iState) - } - r.wprintf("yystate%d:\n", iState) - rule, ok := l.Accepts[state] - if !ok || !l.Rules[rule].EOL { - r.wprintf("%s\n", l.YYN) - } - if ok && l.YYM != "yym" { - r.wprintf("yyrule = %d\n", rule) - r.wprintf("%s\n", l.YYM) - } - if _, ok := r.scStates[iState]; ok { - r.wprintf("yystart%d:\n", iState) - } - if len(state.Consuming) != 0 { - r.transitions(l, state) - } else { - if rule, ok := l.Accepts[state]; ok { - r.wprintf("goto yyrule%d\n\n", rule) - } else { - panic("internal error") - } - } - } -} - -func (r renderGo) render(srcname string, l *lex.L) { - r.prolog(l) - r.states(l) - r.rules(l) - r.scanFail(l) - r.userCode(l) -} diff --git a/vendor/github.com/cznic/mathutil/mersenne/LICENSE b/vendor/github.com/cznic/mathutil/mersenne/LICENSE new file mode 100644 index 0000000000000..4fa2a1f4fdba0 --- /dev/null +++ b/vendor/github.com/cznic/mathutil/mersenne/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014 The mersenne Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the names of the authors nor the names of the +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cznic/parser/nquads/LICENSE b/vendor/github.com/cznic/parser/nquads/LICENSE new file mode 100644 index 0000000000000..f007b7fa8ff3a --- /dev/null +++ b/vendor/github.com/cznic/parser/nquads/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014 The parser Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the names of the authors nor the names of the +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go deleted file mode 100644 index 0d6055d610e3a..0000000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go +++ /dev/null @@ -1,51 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* - A plugin for the Google protocol buffer compiler to generate Go code. - Run it by building this program and putting it in your path with the name - protoc-gen-go - That word 'go' at the end becomes part of the option string set for the - protocol compiler, so once the protocol compiler (protoc) is installed - you can run - protoc --go_out=output_directory input_directory/file.proto - to generate Go bindings for the protocol defined by file.proto. - With that input, the output will be written to - output_directory/file.pb.go - - The generated code is documented in the package comment for - the library. - - See the README and documentation for protocol buffers to learn more: - https://developers.google.com/protocol-buffers/ - -*/ -package documentation diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go deleted file mode 100644 index 532a550050ee3..0000000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go +++ /dev/null @@ -1,34 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2015 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package main - -import _ "github.com/golang/protobuf/protoc-gen-go/grpc" diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/main.go b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go deleted file mode 100644 index 8e2486de0b2e2..0000000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/main.go +++ /dev/null @@ -1,98 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// protoc-gen-go is a plugin for the Google protocol buffer compiler to generate -// Go code. Run it by building this program and putting it in your path with -// the name -// protoc-gen-go -// That word 'go' at the end becomes part of the option string set for the -// protocol compiler, so once the protocol compiler (protoc) is installed -// you can run -// protoc --go_out=output_directory input_directory/file.proto -// to generate Go bindings for the protocol defined by file.proto. -// With that input, the output will be written to -// output_directory/file.pb.go -// -// The generated code is documented in the package comment for -// the library. -// -// See the README and documentation for protocol buffers to learn more: -// https://developers.google.com/protocol-buffers/ -package main - -import ( - "io/ioutil" - "os" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/protoc-gen-go/generator" -) - -func main() { - // Begin by allocating a generator. The request and response structures are stored there - // so we can do error handling easily - the response structure contains the field to - // report failure. - g := generator.New() - - data, err := ioutil.ReadAll(os.Stdin) - if err != nil { - g.Error(err, "reading input") - } - - if err := proto.Unmarshal(data, g.Request); err != nil { - g.Error(err, "parsing input proto") - } - - if len(g.Request.FileToGenerate) == 0 { - g.Fail("no files to generate") - } - - g.CommandLineParameters(g.Request.GetParameter()) - - // Create a wrapped version of the Descriptors and EnumDescriptors that - // point to the file that defines them. - g.WrapTypes() - - g.SetPackageNames() - g.BuildTypeNameMap() - - g.GenerateAllFiles() - - // Send back the results. - data, err = proto.Marshal(g.Response) - if err != nil { - g.Error(err, "failed to marshal output proto") - } - _, err = os.Stdout.Write(data) - if err != nil { - g.Error(err, "failed to write output proto") - } -} diff --git a/vendor/github.com/prometheus/client_model/ruby/LICENSE b/vendor/github.com/prometheus/client_model/ruby/LICENSE new file mode 100644 index 0000000000000..11069edd79019 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/ruby/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/golang.org/x/text/doc.go b/vendor/golang.org/x/text/doc.go deleted file mode 100644 index a48e2843fcd07..0000000000000 --- a/vendor/golang.org/x/text/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate go run gen.go - -// text is a repository of text-related packages related to internationalization -// (i18n) and localization (l10n), such as character encodings, text -// transformations, and locale-specific text handling. -package text - -// TODO: more documentation on general concepts, such as Transformers, use -// of normalization, etc. diff --git a/vendor/golang.org/x/text/gen.go b/vendor/golang.org/x/text/gen.go deleted file mode 100644 index 79af97e708b14..0000000000000 --- a/vendor/golang.org/x/text/gen.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// gen runs go generate on Unicode- and CLDR-related package in the text -// repositories, taking into account dependencies and versions. -package main - -import ( - "bytes" - "flag" - "fmt" - "go/build" - "go/format" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "regexp" - "runtime" - "strings" - "sync" - "unicode" - - "golang.org/x/text/internal/gen" -) - -var ( - verbose = flag.Bool("v", false, "verbose output") - force = flag.Bool("force", false, "ignore failing dependencies") - doCore = flag.Bool("core", false, "force an update to core") - excludeList = flag.String("exclude", "", - "comma-separated list of packages to exclude") - - // The user can specify a selection of packages to build on the command line. - args []string -) - -func exclude(pkg string) bool { - if len(args) > 0 { - return !contains(args, pkg) - } - return contains(strings.Split(*excludeList, ","), pkg) -} - -// TODO: -// - Better version handling. -// - Generate tables for the core unicode package? -// - Add generation for encodings. This requires some retooling here and there. -// - Running repo-wide "long" tests. - -var vprintf = fmt.Printf - -func main() { - gen.Init() - args = flag.Args() - if !*verbose { - // Set vprintf to a no-op. - vprintf = func(string, ...interface{}) (int, error) { return 0, nil } - } - - // TODO: create temporary cache directory to load files and create and set - // a "cache" option if the user did not specify the UNICODE_DIR environment - // variable. This will prevent duplicate downloads and also will enable long - // tests, which really need to be run after each generated package. - - updateCore := *doCore - if gen.UnicodeVersion() != unicode.Version { - fmt.Printf("Requested Unicode version %s; core unicode version is %s.\n", - gen.UnicodeVersion(), - unicode.Version) - // TODO: use collate to compare. Simple comparison will work, though, - // until Unicode reaches version 10. To avoid circular dependencies, we - // could use the NumericWeighter without using package collate using a - // trivial Weighter implementation. - if gen.UnicodeVersion() < unicode.Version && !*force { - os.Exit(2) - } - updateCore = true - } - - var unicode = &dependency{} - if updateCore { - fmt.Printf("Updating core to version %s...\n", gen.UnicodeVersion()) - unicode = generate("unicode") - - // Test some users of the unicode packages, especially the ones that - // keep a mirrored table. These may need to be corrected by hand. - generate("regexp", unicode) - generate("strconv", unicode) // mimics Unicode table - generate("strings", unicode) - generate("testing", unicode) // mimics Unicode table - } - - var ( - cldr = generate("./unicode/cldr", unicode) - language = generate("./language", cldr) - internal = generate("./internal", unicode, language) - norm = generate("./unicode/norm", unicode) - rangetable = generate("./unicode/rangetable", unicode) - cases = generate("./cases", unicode, norm, language, rangetable) - width = generate("./width", unicode) - bidi = generate("./unicode/bidi", unicode, norm, rangetable) - mib = generate("./encoding/internal/identifier", unicode) - _ = generate("./encoding/htmlindex", unicode, language, mib) - _ = generate("./encoding/ianaindex", unicode, language, mib) - _ = generate("./secure/precis", unicode, norm, rangetable, cases, width, bidi) - _ = generate("./currency", unicode, cldr, language, internal) - _ = generate("./internal/number", unicode, cldr, language, internal) - _ = generate("./feature/plural", unicode, cldr, language, internal) - _ = generate("./internal/export/idna", unicode, bidi, norm) - _ = generate("./language/display", unicode, cldr, language, internal) - _ = generate("./collate", unicode, norm, cldr, language, rangetable) - _ = generate("./search", unicode, norm, cldr, language, rangetable) - ) - all.Wait() - - // Copy exported packages to the destination golang.org repo. - copyExported("golang.org/x/net/idna") - - if updateCore { - copyVendored() - } - - if hasErrors { - fmt.Println("FAIL") - os.Exit(1) - } - vprintf("SUCCESS\n") -} - -var ( - all sync.WaitGroup - hasErrors bool -) - -type dependency struct { - sync.WaitGroup - hasErrors bool -} - -func generate(pkg string, deps ...*dependency) *dependency { - var wg dependency - if exclude(pkg) { - return &wg - } - wg.Add(1) - all.Add(1) - go func() { - defer wg.Done() - defer all.Done() - // Wait for dependencies to finish. - for _, d := range deps { - d.Wait() - if d.hasErrors && !*force { - fmt.Printf("--- ABORT: %s\n", pkg) - wg.hasErrors = true - return - } - } - vprintf("=== GENERATE %s\n", pkg) - args := []string{"generate"} - if *verbose { - args = append(args, "-v") - } - args = append(args, pkg) - cmd := exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), args...) - w := &bytes.Buffer{} - cmd.Stderr = w - cmd.Stdout = w - if err := cmd.Run(); err != nil { - fmt.Printf("--- FAIL: %s:\n\t%v\n\tError: %v\n", pkg, indent(w), err) - hasErrors = true - wg.hasErrors = true - return - } - - vprintf("=== TEST %s\n", pkg) - args[0] = "test" - cmd = exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), args...) - wt := &bytes.Buffer{} - cmd.Stderr = wt - cmd.Stdout = wt - if err := cmd.Run(); err != nil { - fmt.Printf("--- FAIL: %s:\n\t%v\n\tError: %v\n", pkg, indent(wt), err) - hasErrors = true - wg.hasErrors = true - return - } - vprintf("--- SUCCESS: %s\n\t%v\n", pkg, indent(w)) - fmt.Print(wt.String()) - }() - return &wg -} - -// copyExported copies a package in x/text/internal/export to the -// destination repository. -func copyExported(p string) { - copyPackage( - filepath.Join("internal", "export", path.Base(p)), - filepath.Join("..", filepath.FromSlash(p[len("golang.org/x"):])), - "golang.org/x/text/internal/export/"+path.Base(p), - p) -} - -// copyVendored copies packages used by Go core into the vendored directory. -func copyVendored() { - root := filepath.Join(build.Default.GOROOT, filepath.FromSlash("src/vendor/golang_org/x")) - - err := filepath.Walk(root, func(dir string, info os.FileInfo, err error) error { - if err != nil || !info.IsDir() || root == dir { - return err - } - src := dir[len(root)+1:] - const slash = string(filepath.Separator) - if c := strings.Split(src, slash); c[0] == "text" { - // Copy a text repo package from its normal location. - src = strings.Join(c[1:], slash) - } else { - // Copy the vendored package if it exists in the export directory. - src = filepath.Join("internal", "export", filepath.Base(src)) - } - copyPackage(src, dir, "golang.org", "golang_org") - return nil - }) - if err != nil { - fmt.Printf("Seeding directory %s has failed %v:", root, err) - os.Exit(1) - } -} - -// goGenRE is used to remove go:generate lines. -var goGenRE = regexp.MustCompile("//go:generate[^\n]*\n") - -// copyPackage copies relevant files from a directory in x/text to the -// destination package directory. The destination package is assumed to have -// the same name. For each copied file go:generate lines are removed and -// and package comments are rewritten to the new path. -func copyPackage(dirSrc, dirDst, search, replace string) { - err := filepath.Walk(dirSrc, func(file string, info os.FileInfo, err error) error { - base := filepath.Base(file) - if err != nil || info.IsDir() || - !strings.HasSuffix(base, ".go") || - strings.HasSuffix(base, "_test.go") && !strings.HasPrefix(base, "example") || - // Don't process subdirectories. - filepath.Dir(file) != dirSrc { - return nil - } - b, err := ioutil.ReadFile(file) - if err != nil || bytes.Contains(b, []byte("\n// +build ignore")) { - return err - } - // Fix paths. - b = bytes.Replace(b, []byte(search), []byte(replace), -1) - // Remove go:generate lines. - b = goGenRE.ReplaceAllLiteral(b, nil) - comment := "// Code generated by running \"go generate\" in golang.org/x/text. DO NOT EDIT.\n\n" - if *doCore { - comment = "// Code generated by running \"go run gen.go -core\" in golang.org/x/text. DO NOT EDIT.\n\n" - } - if !bytes.HasPrefix(b, []byte(comment)) { - b = append([]byte(comment), b...) - } - if b, err = format.Source(b); err != nil { - fmt.Println("Failed to format file:", err) - os.Exit(1) - } - file = filepath.Join(dirDst, base) - vprintf("=== COPY %s\n", file) - return ioutil.WriteFile(file, b, 0666) - }) - if err != nil { - fmt.Println("Copying exported files failed:", err) - os.Exit(1) - } -} - -func contains(a []string, s string) bool { - for _, e := range a { - if s == e { - return true - } - } - return false -} - -func indent(b *bytes.Buffer) string { - return strings.Replace(strings.TrimSpace(b.String()), "\n", "\n\t", -1) -} diff --git a/vendor/golang.org/x/text/internal/gen.go b/vendor/golang.org/x/text/internal/gen.go deleted file mode 100644 index 1d678af574059..0000000000000 --- a/vendor/golang.org/x/text/internal/gen.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "log" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/language" - "golang.org/x/text/unicode/cldr" -) - -func main() { - r := gen.OpenCLDRCoreZip() - defer r.Close() - - d := &cldr.Decoder{} - data, err := d.DecodeZip(r) - if err != nil { - log.Fatalf("DecodeZip: %v", err) - } - - w := gen.NewCodeWriter() - defer w.WriteGoFile("tables.go", "internal") - - // Create parents table. - parents := make([]uint16, language.NumCompactTags) - for _, loc := range data.Locales() { - tag := language.MustParse(loc) - index, ok := language.CompactIndex(tag) - if !ok { - continue - } - parentIndex := 0 // und - for p := tag.Parent(); p != language.Und; p = p.Parent() { - if x, ok := language.CompactIndex(p); ok { - parentIndex = x - break - } - } - parents[index] = uint16(parentIndex) - } - - w.WriteComment(` - Parent maps a compact index of a tag to the compact index of the parent of - this tag.`) - w.WriteVar("Parent", parents) -} diff --git a/vendor/golang.org/x/text/internal/internal.go b/vendor/golang.org/x/text/internal/internal.go deleted file mode 100644 index eac832850daad..0000000000000 --- a/vendor/golang.org/x/text/internal/internal.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate go run gen.go - -// Package internal contains non-exported functionality that are used by -// packages in the text repository. -package internal // import "golang.org/x/text/internal" - -import ( - "sort" - - "golang.org/x/text/language" -) - -// SortTags sorts tags in place. -func SortTags(tags []language.Tag) { - sort.Sort(sorter(tags)) -} - -type sorter []language.Tag - -func (s sorter) Len() int { - return len(s) -} - -func (s sorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s sorter) Less(i, j int) bool { - return s[i].String() < s[j].String() -} - -// UniqueTags sorts and filters duplicate tags in place and returns a slice with -// only unique tags. -func UniqueTags(tags []language.Tag) []language.Tag { - if len(tags) <= 1 { - return tags - } - SortTags(tags) - k := 0 - for i := 1; i < len(tags); i++ { - if tags[k].String() < tags[i].String() { - k++ - tags[k] = tags[i] - } - } - return tags[:k+1] -} diff --git a/vendor/golang.org/x/text/internal/match.go b/vendor/golang.org/x/text/internal/match.go deleted file mode 100644 index a67fcaca13f29..0000000000000 --- a/vendor/golang.org/x/text/internal/match.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -// This file contains matchers that implement CLDR inheritance. -// -// See http://unicode.org/reports/tr35/#Locale_Inheritance. -// -// Some of the inheritance described in this document is already handled by -// the cldr package. - -import ( - "golang.org/x/text/language" -) - -// TODO: consider if (some of the) matching algorithm needs to be public after -// getting some feel about what is generic and what is specific. - -// NewInheritanceMatcher returns a matcher that matches based on the inheritance -// chain. -// -// The matcher uses canonicalization and the parent relationship to find a -// match. The resulting match will always be either Und or a language with the -// same language and script as the requested language. It will not match -// languages for which there is understood to be mutual or one-directional -// intelligibility. -// -// A Match will indicate an Exact match if the language matches after -// canonicalization and High if the matched tag is a parent. -func NewInheritanceMatcher(t []language.Tag) *InheritanceMatcher { - tags := &InheritanceMatcher{make(map[language.Tag]int)} - for i, tag := range t { - ct, err := language.All.Canonicalize(tag) - if err != nil { - ct = tag - } - tags.index[ct] = i - } - return tags -} - -type InheritanceMatcher struct { - index map[language.Tag]int -} - -func (m InheritanceMatcher) Match(want ...language.Tag) (language.Tag, int, language.Confidence) { - for _, t := range want { - ct, err := language.All.Canonicalize(t) - if err != nil { - ct = t - } - conf := language.Exact - for { - if index, ok := m.index[ct]; ok { - return ct, index, conf - } - if ct == language.Und { - break - } - ct = ct.Parent() - conf = language.High - } - } - return language.Und, 0, language.No -} diff --git a/vendor/golang.org/x/text/internal/tables.go b/vendor/golang.org/x/text/internal/tables.go deleted file mode 100644 index 7fb15f6b85b35..0000000000000 --- a/vendor/golang.org/x/text/internal/tables.go +++ /dev/null @@ -1,116 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -package internal - -// Parent maps a compact index of a tag to the compact index of the parent of -// this tag. -var Parent = []uint16{ // 752 elements - // Entry 0 - 3F - 0x0000, 0x0053, 0x00e5, 0x0000, 0x0003, 0x0003, 0x0000, 0x0006, - 0x0000, 0x0008, 0x0000, 0x000a, 0x0000, 0x000c, 0x000c, 0x000c, - 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, - 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, - 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, - 0x000c, 0x0000, 0x0000, 0x002a, 0x0000, 0x002c, 0x0000, 0x002e, - 0x0000, 0x0000, 0x0031, 0x0030, 0x0030, 0x0000, 0x0035, 0x0000, - 0x0037, 0x0000, 0x0039, 0x0000, 0x003b, 0x0000, 0x003d, 0x0000, - // Entry 40 - 7F - 0x0000, 0x0040, 0x0000, 0x0042, 0x0042, 0x0000, 0x0045, 0x0045, - 0x0000, 0x0048, 0x0000, 0x004a, 0x0000, 0x0000, 0x004d, 0x004c, - 0x004c, 0x0000, 0x0051, 0x0051, 0x0051, 0x0051, 0x0000, 0x0056, - 0x0000, 0x0058, 0x0000, 0x005a, 0x0000, 0x005c, 0x005c, 0x0000, - 0x005f, 0x0000, 0x0061, 0x0000, 0x0063, 0x0000, 0x0065, 0x0065, - 0x0000, 0x0068, 0x0000, 0x006a, 0x006a, 0x006a, 0x006a, 0x006a, - 0x006a, 0x006a, 0x0000, 0x0072, 0x0000, 0x0074, 0x0000, 0x0076, - 0x0000, 0x0000, 0x0079, 0x0000, 0x007b, 0x0000, 0x007d, 0x0000, - // Entry 80 - BF - 0x007f, 0x007f, 0x0000, 0x0082, 0x0082, 0x0000, 0x0085, 0x0086, - 0x0086, 0x0086, 0x0085, 0x0087, 0x0086, 0x0086, 0x0086, 0x0085, - 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0087, 0x0086, - 0x0086, 0x0086, 0x0086, 0x0087, 0x0086, 0x0087, 0x0086, 0x0086, - 0x0087, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, - 0x0086, 0x0086, 0x0085, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, - 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, - 0x0086, 0x0086, 0x0086, 0x0086, 0x0085, 0x0086, 0x0085, 0x0086, - // Entry C0 - FF - 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0087, - 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0085, - 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0087, 0x0086, 0x0086, - 0x0087, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, - 0x0086, 0x0086, 0x0086, 0x0086, 0x0085, 0x0085, 0x0086, 0x0086, - 0x0085, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0000, 0x00ee, - 0x0000, 0x00f0, 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x00f1, - 0x00f1, 0x00f1, 0x00f0, 0x00f1, 0x00f0, 0x00f0, 0x00f1, 0x00f1, - // Entry 100 - 13F - 0x00f0, 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x00f0, 0x00f1, 0x00f1, - 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x0000, 0x010c, 0x0000, 0x010e, - 0x0000, 0x0110, 0x0000, 0x0112, 0x0112, 0x0000, 0x0115, 0x0115, - 0x0115, 0x0115, 0x0000, 0x011a, 0x0000, 0x011c, 0x0000, 0x011e, - 0x011e, 0x0000, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, - 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, - 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, - 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, - // Entry 140 - 17F - 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, - 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, - 0x0000, 0x0150, 0x0000, 0x0152, 0x0000, 0x0154, 0x0000, 0x0156, - 0x0000, 0x0158, 0x0000, 0x015a, 0x015a, 0x015a, 0x0000, 0x015e, - 0x0000, 0x0000, 0x0161, 0x0000, 0x0163, 0x0000, 0x0165, 0x0165, - 0x0165, 0x0000, 0x0169, 0x0000, 0x016b, 0x0000, 0x016d, 0x0000, - 0x016f, 0x016f, 0x0000, 0x0172, 0x0000, 0x0174, 0x0000, 0x0176, - 0x0000, 0x0178, 0x0000, 0x017a, 0x0000, 0x017c, 0x0000, 0x017e, - // Entry 180 - 1BF - 0x0000, 0x0180, 0x0180, 0x0180, 0x0000, 0x0000, 0x0185, 0x0000, - 0x0000, 0x0188, 0x0000, 0x018a, 0x0000, 0x0000, 0x018d, 0x0000, - 0x018f, 0x0000, 0x0000, 0x0192, 0x0000, 0x0000, 0x0195, 0x0000, - 0x0197, 0x0000, 0x0199, 0x0000, 0x019b, 0x0000, 0x019d, 0x0000, - 0x019f, 0x0000, 0x01a1, 0x0000, 0x01a3, 0x0000, 0x01a5, 0x0000, - 0x01a7, 0x0000, 0x01a9, 0x01a9, 0x0000, 0x01ac, 0x0000, 0x01ae, - 0x0000, 0x01b0, 0x0000, 0x01b2, 0x0000, 0x01b4, 0x0000, 0x0000, - 0x01b7, 0x0000, 0x01b9, 0x0000, 0x01bb, 0x0000, 0x01bd, 0x0000, - // Entry 1C0 - 1FF - 0x01bf, 0x0000, 0x01c1, 0x0000, 0x01c3, 0x01c3, 0x01c3, 0x01c3, - 0x0000, 0x01c8, 0x0000, 0x01ca, 0x01ca, 0x0000, 0x01cd, 0x0000, - 0x01cf, 0x0000, 0x01d1, 0x0000, 0x01d3, 0x0000, 0x01d5, 0x0000, - 0x01d7, 0x01d7, 0x0000, 0x01da, 0x0000, 0x01dc, 0x0000, 0x01de, - 0x0000, 0x01e0, 0x0000, 0x01e2, 0x0000, 0x01e4, 0x0000, 0x01e6, - 0x0000, 0x01e8, 0x0000, 0x01ea, 0x0000, 0x01ec, 0x01ec, 0x01ec, - 0x0000, 0x01f0, 0x0000, 0x01f2, 0x0000, 0x01f4, 0x0000, 0x01f6, - 0x0000, 0x0000, 0x01f9, 0x0000, 0x01fb, 0x01fb, 0x0000, 0x01fe, - // Entry 200 - 23F - 0x0000, 0x0200, 0x0200, 0x0000, 0x0203, 0x0203, 0x0000, 0x0206, - 0x0206, 0x0206, 0x0206, 0x0206, 0x0206, 0x0206, 0x0000, 0x020e, - 0x0000, 0x0210, 0x0000, 0x0212, 0x0000, 0x0000, 0x0000, 0x0000, - 0x0000, 0x0218, 0x0000, 0x0000, 0x021b, 0x0000, 0x021d, 0x021d, - 0x0000, 0x0220, 0x0000, 0x0222, 0x0222, 0x0000, 0x0000, 0x0226, - 0x0225, 0x0225, 0x0000, 0x0000, 0x022b, 0x0000, 0x022d, 0x0000, - 0x022f, 0x0000, 0x023b, 0x0231, 0x023b, 0x023b, 0x023b, 0x023b, - 0x023b, 0x023b, 0x023b, 0x0231, 0x023b, 0x023b, 0x0000, 0x023e, - // Entry 240 - 27F - 0x023e, 0x023e, 0x0000, 0x0242, 0x0000, 0x0244, 0x0000, 0x0246, - 0x0246, 0x0000, 0x0249, 0x0000, 0x024b, 0x024b, 0x024b, 0x024b, - 0x024b, 0x024b, 0x0000, 0x0252, 0x0000, 0x0254, 0x0000, 0x0256, - 0x0000, 0x0258, 0x0000, 0x025a, 0x0000, 0x0000, 0x025d, 0x025d, - 0x025d, 0x0000, 0x0261, 0x0000, 0x0263, 0x0000, 0x0265, 0x0000, - 0x0000, 0x0268, 0x0267, 0x0267, 0x0000, 0x026c, 0x0000, 0x026e, - 0x0000, 0x0270, 0x0000, 0x0000, 0x0000, 0x0000, 0x0275, 0x0000, - 0x0000, 0x0278, 0x0000, 0x027a, 0x027a, 0x027a, 0x027a, 0x0000, - // Entry 280 - 2BF - 0x027f, 0x027f, 0x027f, 0x0000, 0x0283, 0x0283, 0x0283, 0x0283, - 0x0283, 0x0000, 0x0289, 0x0289, 0x0289, 0x0289, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0291, 0x0291, 0x0291, 0x0000, 0x0295, 0x0295, - 0x0295, 0x0295, 0x0000, 0x0000, 0x029b, 0x029b, 0x029b, 0x029b, - 0x0000, 0x02a0, 0x0000, 0x02a2, 0x02a2, 0x0000, 0x02a5, 0x0000, - 0x02a7, 0x02a7, 0x0000, 0x0000, 0x02ab, 0x0000, 0x0000, 0x02ae, - 0x0000, 0x02b0, 0x02b0, 0x0000, 0x0000, 0x02b4, 0x0000, 0x02b6, - 0x0000, 0x02b8, 0x0000, 0x02ba, 0x0000, 0x02bc, 0x02bc, 0x0000, - // Entry 2C0 - 2FF - 0x0000, 0x02c0, 0x0000, 0x02c2, 0x02bf, 0x02bf, 0x0000, 0x0000, - 0x02c7, 0x02c6, 0x02c6, 0x0000, 0x0000, 0x02cc, 0x0000, 0x02ce, - 0x0000, 0x02d0, 0x0000, 0x0000, 0x02d3, 0x0000, 0x0000, 0x0000, - 0x02d7, 0x0000, 0x02d9, 0x0000, 0x02db, 0x0000, 0x02dd, 0x02dd, - 0x0000, 0x02e0, 0x0000, 0x02e2, 0x0000, 0x02e4, 0x02e4, 0x02e4, - 0x02e4, 0x02e4, 0x0000, 0x02ea, 0x02eb, 0x02ea, 0x0000, 0x02ee, -} // Size: 1528 bytes - -// Total table size 1528 bytes (1KiB); checksum: B99CF952 diff --git a/vendor/golang.org/x/text/secure/doc.go b/vendor/golang.org/x/text/secure/doc.go deleted file mode 100644 index e531c354354cf..0000000000000 --- a/vendor/golang.org/x/text/secure/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// secure is a repository of text security related packages. -package secure // import "golang.org/x/text/secure" diff --git a/vendor/golang.org/x/text/unicode/doc.go b/vendor/golang.org/x/text/unicode/doc.go deleted file mode 100644 index e8f1032d051a5..0000000000000 --- a/vendor/golang.org/x/text/unicode/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// unicode holds packages with implementations of Unicode standards that are -// mostly used as building blocks for other packages in golang.org/x/text, -// layout engines, or are otherwise more low-level in nature. -package unicode diff --git a/vendor/google.golang.org/genproto/googleapis/api/authorization_config.pb.go b/vendor/google.golang.org/genproto/googleapis/api/authorization_config.pb.go deleted file mode 100644 index 8062b9619ccbf..0000000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/authorization_config.pb.go +++ /dev/null @@ -1,80 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/api/experimental/authorization_config.proto - -/* -Package api is a generated protocol buffer package. - -It is generated from these files: - google/api/experimental/authorization_config.proto - google/api/experimental/experimental.proto - -It has these top-level messages: - AuthorizationConfig - Experimental -*/ -package api - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Configuration of authorization. -// -// This section determines the authorization provider, if unspecified, then no -// authorization check will be done. -// -// Example: -// -// experimental: -// authorization: -// provider: firebaserules.googleapis.com -type AuthorizationConfig struct { - // The name of the authorization provider, such as - // firebaserules.googleapis.com. - Provider string `protobuf:"bytes,1,opt,name=provider" json:"provider,omitempty"` -} - -func (m *AuthorizationConfig) Reset() { *m = AuthorizationConfig{} } -func (m *AuthorizationConfig) String() string { return proto.CompactTextString(m) } -func (*AuthorizationConfig) ProtoMessage() {} -func (*AuthorizationConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *AuthorizationConfig) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -func init() { - proto.RegisterType((*AuthorizationConfig)(nil), "google.api.AuthorizationConfig") -} - -func init() { proto.RegisterFile("google/api/experimental/authorization_config.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 180 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4a, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xad, 0x28, 0x48, 0x2d, 0xca, 0xcc, 0x4d, 0xcd, - 0x2b, 0x49, 0xcc, 0xd1, 0x4f, 0x2c, 0x2d, 0xc9, 0xc8, 0x2f, 0xca, 0xac, 0x4a, 0x2c, 0xc9, 0xcc, - 0xcf, 0x8b, 0x4f, 0xce, 0xcf, 0x4b, 0xcb, 0x4c, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, - 0x82, 0xe8, 0xd1, 0x4b, 0x2c, 0xc8, 0x54, 0x32, 0xe4, 0x12, 0x76, 0x44, 0x56, 0xe9, 0x0c, 0x56, - 0x28, 0x24, 0xc5, 0xc5, 0x51, 0x50, 0x94, 0x5f, 0x96, 0x99, 0x92, 0x5a, 0x24, 0xc1, 0xa8, 0xc0, - 0xa8, 0xc1, 0x19, 0x04, 0xe7, 0x3b, 0x25, 0x71, 0xf1, 0x25, 0xe7, 0xe7, 0xea, 0x21, 0x0c, 0x71, - 0x92, 0xc0, 0x62, 0x44, 0x00, 0xc8, 0xaa, 0x00, 0xc6, 0x28, 0x5d, 0xa8, 0xba, 0xf4, 0xfc, 0x9c, - 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74, 0xfd, 0xf4, 0xd4, 0x3c, 0xb0, 0x43, 0xf4, 0x21, 0x52, - 0x89, 0x05, 0x99, 0xc5, 0x20, 0xf7, 0x5b, 0x27, 0x16, 0x64, 0x2e, 0x62, 0x62, 0x71, 0x77, 0x0c, - 0xf0, 0x4c, 0x62, 0x03, 0x2b, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x52, 0x27, 0x0c, 0xba, - 0xdf, 0x00, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/experimental.pb.go b/vendor/google.golang.org/genproto/googleapis/api/experimental.pb.go deleted file mode 100644 index a88df3d14039e..0000000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/experimental.pb.go +++ /dev/null @@ -1,56 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/api/experimental/experimental.proto - -package api - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "google.golang.org/genproto/googleapis/api/annotations" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// Experimental service configuration. These configuration options can -// only be used by whitelisted users. -type Experimental struct { - // Authorization configuration. - Authorization *AuthorizationConfig `protobuf:"bytes,8,opt,name=authorization" json:"authorization,omitempty"` -} - -func (m *Experimental) Reset() { *m = Experimental{} } -func (m *Experimental) String() string { return proto.CompactTextString(m) } -func (*Experimental) ProtoMessage() {} -func (*Experimental) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } - -func (m *Experimental) GetAuthorization() *AuthorizationConfig { - if m != nil { - return m.Authorization - } - return nil -} - -func init() { - proto.RegisterType((*Experimental)(nil), "google.api.Experimental") -} - -func init() { proto.RegisterFile("google/api/experimental/experimental.proto", fileDescriptor1) } - -var fileDescriptor1 = []byte{ - // 204 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4a, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xad, 0x28, 0x48, 0x2d, 0xca, 0xcc, 0x4d, 0xcd, - 0x2b, 0x49, 0xcc, 0x41, 0xe1, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x71, 0x41, 0xd4, 0xea, - 0x25, 0x16, 0x64, 0x4a, 0xc9, 0x20, 0xe9, 0x4b, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, - 0xcf, 0x2b, 0x86, 0xa8, 0x94, 0x32, 0xc2, 0x65, 0x6a, 0x62, 0x69, 0x49, 0x46, 0x7e, 0x51, 0x66, - 0x15, 0x58, 0x75, 0x7c, 0x72, 0x7e, 0x5e, 0x5a, 0x66, 0x3a, 0x44, 0x8f, 0x52, 0x28, 0x17, 0x8f, - 0x2b, 0x92, 0x52, 0x21, 0x57, 0x2e, 0x5e, 0x14, 0xd5, 0x12, 0x1c, 0x0a, 0x8c, 0x1a, 0xdc, 0x46, - 0xf2, 0x7a, 0x08, 0x57, 0xe8, 0x39, 0x22, 0x2b, 0x70, 0x06, 0x9b, 0x16, 0x84, 0xaa, 0xcb, 0x29, - 0x9a, 0x8b, 0x2f, 0x39, 0x3f, 0x17, 0x49, 0x93, 0x93, 0x20, 0xb2, 0x35, 0x01, 0x20, 0xbb, 0x03, - 0x18, 0xa3, 0x74, 0xa1, 0x0a, 0xd2, 0xf3, 0x73, 0x12, 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, - 0xd3, 0x53, 0xf3, 0xc0, 0x2e, 0xd3, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0x83, 0x3c, 0x64, 0x9d, - 0x58, 0x90, 0xb9, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d, 0xac, 0xc0, 0x18, 0x10, - 0x00, 0x00, 0xff, 0xff, 0xa0, 0x95, 0x20, 0xe5, 0x46, 0x01, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/genproto/regen.go b/vendor/google.golang.org/genproto/regen.go deleted file mode 100644 index 9c906f2095be0..0000000000000 --- a/vendor/google.golang.org/genproto/regen.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2016 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build ignore - -// Regen.go regenerates the genproto repository. -// -// Regen.go recursively walks through each directory named by given arguments, -// looking for all .proto files. (Symlinks are not followed.) -// If the pkg_prefix flag is not an empty string, -// any proto file without `go_package` option -// or whose option does not begin with the prefix is ignored. -// Protoc is executed on remaining files, -// one invocation per set of files declaring the same Go package. -package main - -import ( - "flag" - "fmt" - "io/ioutil" - "log" - "os" - "os/exec" - "path/filepath" - "regexp" - "strconv" - "strings" -) - -var goPkgOptRe = regexp.MustCompile(`(?m)^option go_package = (.*);`) - -func usage() { - fmt.Fprintln(os.Stderr, `usage: go run regen.go -go_out=path/to/output [-pkg_prefix=pkg/prefix] roots... - -Most users will not need to run this file directly. -To regenerate this repository, run regen.sh instead.`) - flag.PrintDefaults() -} - -func main() { - goOutDir := flag.String("go_out", "", "go_out argument to pass to protoc-gen-go") - pkgPrefix := flag.String("pkg_prefix", "", "only include proto files with go_package starting with this prefix") - flag.Usage = usage - flag.Parse() - - if *goOutDir == "" { - log.Fatal("need go_out flag") - } - - pkgFiles := make(map[string][]string) - walkFn := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.Mode().IsRegular() || !strings.HasSuffix(path, ".proto") { - return nil - } - pkg, err := goPkg(path) - if err != nil { - return err - } - pkgFiles[pkg] = append(pkgFiles[pkg], path) - return nil - } - for _, root := range flag.Args() { - if err := filepath.Walk(root, walkFn); err != nil { - log.Fatal(err) - } - } - for pkg, fnames := range pkgFiles { - if !strings.HasPrefix(pkg, *pkgPrefix) { - continue - } - if out, err := protoc(*goOutDir, flag.Args(), fnames); err != nil { - log.Fatalf("error executing protoc: %s\n%s", err, out) - } - } -} - -// goPkg reports the import path declared in the given file's -// `go_package` option. If the option is missing, goPkg returns empty string. -func goPkg(fname string) (string, error) { - content, err := ioutil.ReadFile(fname) - if err != nil { - return "", err - } - - var pkgName string - if match := goPkgOptRe.FindSubmatch(content); len(match) > 0 { - pn, err := strconv.Unquote(string(match[1])) - if err != nil { - return "", err - } - pkgName = pn - } - if p := strings.IndexRune(pkgName, ';'); p > 0 { - pkgName = pkgName[:p] - } - return pkgName, nil -} - -// protoc executes the "protoc" command on files named in fnames, -// passing go_out and include flags specified in goOut and includes respectively. -// protoc returns combined output from stdout and stderr. -func protoc(goOut string, includes, fnames []string) ([]byte, error) { - args := []string{"--go_out=plugins=grpc:" + goOut} - for _, inc := range includes { - args = append(args, "-I", inc) - } - args = append(args, fnames...) - return exec.Command("protoc", args...).CombinedOutput() -} diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go deleted file mode 100644 index aba962840c802..0000000000000 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpc_lb_v1 is the parent package of all gRPC loadbalancer -// message and service protobuf definitions. -package grpc_lb_v1 diff --git a/vendor/google.golang.org/grpc/health/health.go b/vendor/google.golang.org/grpc/health/health.go deleted file mode 100644 index c6212f406f7bb..0000000000000 --- a/vendor/google.golang.org/grpc/health/health.go +++ /dev/null @@ -1,72 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -//go:generate protoc --go_out=plugins=grpc:. grpc_health_v1/health.proto - -// Package health provides some utility functions to health-check a server. The implementation -// is based on protobuf. Users need to write their own implementations if other IDLs are used. -package health - -import ( - "sync" - - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - healthpb "google.golang.org/grpc/health/grpc_health_v1" -) - -// Server implements `service Health`. -type Server struct { - mu sync.Mutex - // statusMap stores the serving status of the services this Server monitors. - statusMap map[string]healthpb.HealthCheckResponse_ServingStatus -} - -// NewServer returns a new Server. -func NewServer() *Server { - return &Server{ - statusMap: make(map[string]healthpb.HealthCheckResponse_ServingStatus), - } -} - -// Check implements `service Health`. -func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { - s.mu.Lock() - defer s.mu.Unlock() - if in.Service == "" { - // check the server overall health status. - return &healthpb.HealthCheckResponse{ - Status: healthpb.HealthCheckResponse_SERVING, - }, nil - } - if status, ok := s.statusMap[in.Service]; ok { - return &healthpb.HealthCheckResponse{ - Status: status, - }, nil - } - return nil, grpc.Errorf(codes.NotFound, "unknown service") -} - -// SetServingStatus is called when need to reset the serving status of a service -// or insert a new service entry into the statusMap. -func (s *Server) SetServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) { - s.mu.Lock() - s.statusMap[service] = status - s.mu.Unlock() -}