From 1b56c9c80fe8ecdedc8491034e3f2755157b1ec1 Mon Sep 17 00:00:00 2001 From: Roland Schilter Date: Wed, 7 Jun 2017 17:20:40 +0100 Subject: [PATCH] Update github.com/weaveworks/common & deps Bumped all packages that make the build fail: gvt update github.com/golang/protobuf/proto gvt fetch github.com/golang/protobuf/ptypes gvt fetch google.golang.org/genproto/googleapis/rpc/status gvt update google.golang.org/grpc/status gvt update google.golang.org/grpc/transport gvt update golang.org/x/net/http2 --- .../github.com/golang/protobuf/proto/LICENSE | 31 + .../github.com/golang/protobuf/proto/Makefile | 43 - .../golang/protobuf/proto/all_test.go | 155 +- .../golang/protobuf/proto/any_test.go | 300 +++ .../github.com/golang/protobuf/proto/clone.go | 14 +- .../golang/protobuf/proto/clone_test.go | 33 + .../golang/protobuf/proto/decode.go | 133 +- .../golang/protobuf/proto/decode_test.go | 258 +++ .../golang/protobuf/proto/encode.go | 112 +- .../golang/protobuf/proto/encode_test.go | 85 + .../github.com/golang/protobuf/proto/equal.go | 63 +- .../golang/protobuf/proto/equal_test.go | 15 + .../golang/protobuf/proto/extensions.go | 231 +- .../golang/protobuf/proto/extensions_test.go | 246 +- .../github.com/golang/protobuf/proto/lib.go | 18 +- .../golang/protobuf/proto/map_test.go | 46 + .../golang/protobuf/proto/message_set.go | 84 +- .../golang/protobuf/proto/message_set_test.go | 10 +- .../golang/protobuf/proto/pointer_reflect.go | 7 +- .../golang/protobuf/proto/pointer_unsafe.go | 6 +- .../golang/protobuf/proto/properties.go | 97 +- .../protobuf/proto/proto3_proto/proto3.pb.go | 257 ++- .../golang/protobuf/proto/proto3_test.go | 10 + .../golang/protobuf/proto/size_test.go | 25 +- .../golang/protobuf/proto/testdata/test.pb.go | 1797 ++++++++++++--- .../golang/protobuf/proto/testdata/test.proto | 54 + .../github.com/golang/protobuf/proto/text.go | 223 +- .../golang/protobuf/proto/text_parser.go | 201 +- .../golang/protobuf/proto/text_parser_test.go | 147 +- .../github.com/golang/protobuf/ptypes/LICENSE | 31 + .../github.com/golang/protobuf/ptypes/any.go | 136 ++ .../golang/protobuf/ptypes/any/any.pb.go | 168 ++ .../proto3.proto => ptypes/doc.go} | 43 +- .../golang/protobuf/ptypes/duration.go | 102 + .../protobuf/ptypes/duration/duration.pb.go | 146 ++ .../golang/protobuf/ptypes/empty/empty.pb.go | 68 + .../protobuf/ptypes/struct/struct.pb.go | 382 ++++ .../golang/protobuf/ptypes/timestamp.go | 125 ++ .../protobuf/ptypes/timestamp/timestamp.pb.go | 162 ++ .../protobuf/ptypes/wrappers/wrappers.pb.go | 262 +++ .../weaveworks/common/backoff/backoff.go | 13 +- .../weaveworks/common/httpgrpc/httpgrpc.go | 193 +- .../weaveworks/common/httpgrpc/httpgrpc.pb.go | 5 +- .../common/httpgrpc/server/server.go | 183 ++ .../weaveworks/common/logging/logging.go | 12 + .../common/middleware/grpc_instrumentation.go | 15 +- .../common/middleware/path_rewrite.go | 12 +- .../common/server/fake_server.pb.go | 210 ++ .../weaveworks/common/server/server.go | 5 +- .../weaveworks/common/user/logging.go | 21 + vendor/golang.org/x/net/http2/ciphers.go | 641 ++++++ .../x/net/http2/configure_transport.go | 2 +- vendor/golang.org/x/net/http2/databuffer.go | 146 ++ vendor/golang.org/x/net/http2/fixed_buffer.go | 60 - vendor/golang.org/x/net/http2/frame.go | 55 +- vendor/golang.org/x/net/http2/go16.go | 27 - vendor/golang.org/x/net/http2/go18.go | 6 +- vendor/golang.org/x/net/http2/go19.go | 16 + .../golang.org/x/net/http2/h2demo/h2demo.go | 70 +- vendor/golang.org/x/net/http2/h2demo/tmpl.go | 1991 +++++++++++++++++ vendor/golang.org/x/net/http2/hpack/encode.go | 22 +- vendor/golang.org/x/net/http2/hpack/hpack.go | 82 +- vendor/golang.org/x/net/http2/hpack/tables.go | 255 ++- vendor/golang.org/x/net/http2/not_go16.go | 25 - vendor/golang.org/x/net/http2/not_go19.go | 16 + vendor/golang.org/x/net/http2/pipe.go | 16 +- vendor/golang.org/x/net/http2/server.go | 415 ++-- vendor/golang.org/x/net/http2/transport.go | 10 +- .../x/net/http2/writesched_priority.go | 2 +- .../genproto/googleapis/rpc/status/LICENSE | 202 ++ .../googleapis/rpc/status/status.pb.go | 143 ++ vendor/google.golang.org/grpc/balancer.go | 8 + .../grpc/benchmark/worker/benchmark_client.go | 32 +- .../grpc/benchmark/worker/benchmark_server.go | 37 +- .../grpc/benchmark/worker/main.go | 17 +- .../grpc/benchmark/worker/util.go | 15 + vendor/google.golang.org/grpc/call.go | 110 +- vendor/google.golang.org/grpc/clientconn.go | 236 +- vendor/google.golang.org/grpc/codec.go | 119 + vendor/google.golang.org/grpc/codes/codes.go | 2 +- .../grpc/credentials/credentials.go | 12 +- .../grpc/credentials/oauth/oauth.go | 26 +- .../{mock => }/mock_helloworld/hw_mock.go | 0 .../examples/route_guide/client/client.go | 2 +- .../route_guide/mock_routeguide/rg_mock.go | 200 ++ .../examples/route_guide/server/server.go | 2 +- vendor/google.golang.org/grpc/go16.go | 113 + vendor/google.golang.org/grpc/go17.go | 113 + .../grpc/{grpclb => }/grpclb.go | 381 +++- .../grpc/grpclb/grpc_lb_v1/grpclb.pb.go | 402 ++-- .../grpc/grpclb/grpclb_server_generated.go | 54 + vendor/google.golang.org/grpc/interceptor.go | 8 +- .../grpc/interop/test_utils.go | 10 +- .../grpc/keepalive/keepalive.go | 80 + .../grpc/metadata/metadata.go | 94 +- vendor/google.golang.org/grpc/peer/peer.go | 3 +- vendor/google.golang.org/grpc/proxy.go | 145 ++ vendor/google.golang.org/grpc/rpc_util.go | 308 +-- vendor/google.golang.org/grpc/server.go | 467 ++-- .../grpc/stats/grpc_testing/test.pb.go | 157 +- .../google.golang.org/grpc/stats/handlers.go | 7 +- vendor/google.golang.org/grpc/stats/stats.go | 22 +- .../google.golang.org/grpc/status/status.go | 145 ++ vendor/google.golang.org/grpc/stream.go | 173 +- .../{transport/pre_go16.go => test/race.go} | 20 +- .../grpc/test/servertester.go | 295 +++ .../grpc/transport/control.go | 70 +- .../google.golang.org/grpc/transport/go16.go | 13 + .../google.golang.org/grpc/transport/go17.go | 16 +- .../grpc/transport/handler_server.go | 64 +- .../grpc/transport/http2_client.go | 469 ++-- .../grpc/transport/http2_server.go | 410 +++- .../grpc/transport/http_util.go | 176 +- .../grpc/transport/transport.go | 163 +- vendor/manifest | 28 +- 115 files changed, 13837 insertions(+), 2584 deletions(-) create mode 100644 vendor/github.com/golang/protobuf/proto/LICENSE delete mode 100644 vendor/github.com/golang/protobuf/proto/Makefile create mode 100644 vendor/github.com/golang/protobuf/proto/any_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/decode_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/encode_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/map_test.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/LICENSE create mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go rename vendor/github.com/golang/protobuf/{proto/proto3_proto/proto3.proto => ptypes/doc.go} (70%) create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go create mode 100644 vendor/github.com/weaveworks/common/httpgrpc/server/server.go create mode 100644 vendor/github.com/weaveworks/common/server/fake_server.pb.go create mode 100644 vendor/github.com/weaveworks/common/user/logging.go create mode 100644 vendor/golang.org/x/net/http2/ciphers.go create mode 100644 vendor/golang.org/x/net/http2/databuffer.go delete mode 100644 vendor/golang.org/x/net/http2/fixed_buffer.go create mode 100644 vendor/golang.org/x/net/http2/go19.go create mode 100644 vendor/golang.org/x/net/http2/h2demo/tmpl.go create mode 100644 vendor/golang.org/x/net/http2/not_go19.go create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/LICENSE create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go create mode 100644 vendor/google.golang.org/grpc/codec.go rename vendor/google.golang.org/grpc/examples/helloworld/{mock => }/mock_helloworld/hw_mock.go (100%) create mode 100644 vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock.go create mode 100644 vendor/google.golang.org/grpc/go16.go create mode 100644 vendor/google.golang.org/grpc/go17.go rename vendor/google.golang.org/grpc/{grpclb => }/grpclb.go (53%) create mode 100644 vendor/google.golang.org/grpc/grpclb/grpclb_server_generated.go create mode 100644 vendor/google.golang.org/grpc/keepalive/keepalive.go create mode 100644 vendor/google.golang.org/grpc/proxy.go create mode 100644 vendor/google.golang.org/grpc/status/status.go rename vendor/google.golang.org/grpc/{transport/pre_go16.go => test/race.go} (80%) create mode 100644 vendor/google.golang.org/grpc/test/servertester.go diff --git a/vendor/github.com/golang/protobuf/proto/LICENSE b/vendor/github.com/golang/protobuf/proto/LICENSE new file mode 100644 index 0000000000..1b1b1921ef --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/LICENSE @@ -0,0 +1,31 @@ +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile deleted file mode 100644 index f1f06564a1..0000000000 --- a/vendor/github.com/golang/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C testdata - protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto - make diff --git a/vendor/github.com/golang/protobuf/proto/all_test.go b/vendor/github.com/golang/protobuf/proto/all_test.go index f68f913df9..41451a4073 100644 --- a/vendor/github.com/golang/protobuf/proto/all_test.go +++ b/vendor/github.com/golang/protobuf/proto/all_test.go @@ -420,7 +420,7 @@ func TestMarshalerEncoding(t *testing.T) { name string m Message want []byte - wantErr error + errType reflect.Type }{ { name: "Marshaler that fails", @@ -428,9 +428,11 @@ func TestMarshalerEncoding(t *testing.T) { err: errors.New("some marshal err"), b: []byte{5, 6, 7}, }, - // Since there's an error, nothing should be written to buffer. - want: nil, - wantErr: errors.New("some marshal err"), + // Since the Marshal method returned bytes, they should be written to the + // buffer. (For efficiency, we assume that Marshal implementations are + // always correct w.r.t. RequiredNotSetError and output.) + want: []byte{5, 6, 7}, + errType: reflect.TypeOf(errors.New("some marshal err")), }, { name: "Marshaler that fails with RequiredNotSetError", @@ -446,30 +448,37 @@ func TestMarshalerEncoding(t *testing.T) { 10, 3, // for &msgWithFakeMarshaler 5, 6, 7, // for &fakeMarshaler }, - wantErr: &RequiredNotSetError{}, + errType: reflect.TypeOf(&RequiredNotSetError{}), }, { name: "Marshaler that succeeds", m: &fakeMarshaler{ b: []byte{0, 1, 2, 3, 4, 127, 255}, }, - want: []byte{0, 1, 2, 3, 4, 127, 255}, - wantErr: nil, + want: []byte{0, 1, 2, 3, 4, 127, 255}, }, } for _, test := range tests { b := NewBuffer(nil) err := b.Marshal(test.m) - if _, ok := err.(*RequiredNotSetError); ok { - // We're not in package proto, so we can only assert the type in this case. - err = &RequiredNotSetError{} - } - if !reflect.DeepEqual(test.wantErr, err) { - t.Errorf("%s: got err %v wanted %v", test.name, err, test.wantErr) + if reflect.TypeOf(err) != test.errType { + t.Errorf("%s: got err %T(%v) wanted %T", test.name, err, err, test.errType) } if !reflect.DeepEqual(test.want, b.Bytes()) { t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) } + if size := Size(test.m); size != len(b.Bytes()) { + t.Errorf("%s: Size(_) = %v, but marshaled to %v bytes", test.name, size, len(b.Bytes())) + } + + m, mErr := Marshal(test.m) + if !bytes.Equal(b.Bytes(), m) { + t.Errorf("%s: Marshal returned %v, but (*Buffer).Marshal wrote %v", test.name, m, b.Bytes()) + } + if !reflect.DeepEqual(err, mErr) { + t.Errorf("%s: Marshal err = %q, but (*Buffer).Marshal returned %q", + test.name, fmt.Sprint(mErr), fmt.Sprint(err)) + } } } @@ -1302,7 +1311,7 @@ func TestEnum(t *testing.T) { // We don't care what the value actually is, just as long as it doesn't crash. func TestPrintingNilEnumFields(t *testing.T) { pb := new(GoEnum) - fmt.Sprintf("%+v", pb) + _ = fmt.Sprintf("%+v", pb) } // Verify that absent required fields cause Marshal/Unmarshal to return errors. @@ -1311,7 +1320,7 @@ func TestRequiredFieldEnforcement(t *testing.T) { _, err := Marshal(pb) if err == nil { t.Error("marshal: expected error, got nil") - } else if strings.Index(err.Error(), "Label") < 0 { + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Label") { t.Errorf("marshal: bad error type: %v", err) } @@ -1322,16 +1331,42 @@ func TestRequiredFieldEnforcement(t *testing.T) { err = Unmarshal(buf, pb) if err == nil { t.Error("unmarshal: expected error, got nil") - } else if strings.Index(err.Error(), "{Unknown}") < 0 { + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "{Unknown}") { + t.Errorf("unmarshal: bad error type: %v", err) + } +} + +// Verify that absent required fields in groups cause Marshal/Unmarshal to return errors. +func TestRequiredFieldEnforcementGroups(t *testing.T) { + pb := &GoTestRequiredGroupField{Group: &GoTestRequiredGroupField_Group{}} + if _, err := Marshal(pb); err == nil { + t.Error("marshal: expected error, got nil") + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.Field") { + t.Errorf("marshal: bad error type: %v", err) + } + + buf := []byte{11, 12} + if err := Unmarshal(buf, pb); err == nil { + t.Error("unmarshal: expected error, got nil") + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.{Unknown}") { t.Errorf("unmarshal: bad error type: %v", err) } } func TestTypedNilMarshal(t *testing.T) { // A typed nil should return ErrNil and not crash. - _, err := Marshal((*GoEnum)(nil)) - if err != ErrNil { - t.Errorf("Marshal: got err %v, want ErrNil", err) + { + var m *GoEnum + if _, err := Marshal(m); err != ErrNil { + t.Errorf("Marshal(%#v): got %v, want ErrNil", m, err) + } + } + + { + m := &Communique{Union: &Communique_Msg{nil}} + if _, err := Marshal(m); err == nil || err == ErrNil { + t.Errorf("Marshal(%#v): got %v, want errOneofHasNil", m, err) + } } } @@ -1947,14 +1982,88 @@ func TestMapFieldRoundTrips(t *testing.T) { } func TestMapFieldWithNil(t *testing.T) { - m := &MessageWithMap{ + m1 := &MessageWithMap{ MsgMapping: map[int64]*FloatingPoint{ 1: nil, }, } - b, err := Marshal(m) - if err == nil { - t.Fatalf("Marshal of bad map should have failed, got these bytes: %v", b) + b, err := Marshal(m1) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) + } + if v, ok := m2.MsgMapping[1]; !ok { + t.Error("msg_mapping[1] not present") + } else if v != nil { + t.Errorf("msg_mapping[1] not nil: %v", v) + } +} + +func TestMapFieldWithNilBytes(t *testing.T) { + m1 := &MessageWithMap{ + ByteMapping: map[bool][]byte{ + false: []byte{}, + true: nil, + }, + } + n := Size(m1) + b, err := Marshal(m1) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if n != len(b) { + t.Errorf("Size(m1) = %d; want len(Marshal(m1)) = %d", n, len(b)) + } + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) + } + if v, ok := m2.ByteMapping[false]; !ok { + t.Error("byte_mapping[false] not present") + } else if len(v) != 0 { + t.Errorf("byte_mapping[false] not empty: %#v", v) + } + if v, ok := m2.ByteMapping[true]; !ok { + t.Error("byte_mapping[true] not present") + } else if len(v) != 0 { + t.Errorf("byte_mapping[true] not empty: %#v", v) + } +} + +func TestDecodeMapFieldMissingKey(t *testing.T) { + b := []byte{ + 0x0A, 0x03, // message, tag 1 (name_mapping), of length 3 bytes + // no key + 0x12, 0x01, 0x6D, // string value of length 1 byte, value "m" + } + got := &MessageWithMap{} + err := Unmarshal(b, got) + if err != nil { + t.Fatalf("failed to marshal map with missing key: %v", err) + } + want := &MessageWithMap{NameMapping: map[int32]string{0: "m"}} + if !Equal(got, want) { + t.Errorf("Unmarshaled map with no key was not as expected. got: %v, want %v", got, want) + } +} + +func TestDecodeMapFieldMissingValue(t *testing.T) { + b := []byte{ + 0x0A, 0x02, // message, tag 1 (name_mapping), of length 2 bytes + 0x08, 0x01, // varint key, value 1 + // no value + } + got := &MessageWithMap{} + err := Unmarshal(b, got) + if err != nil { + t.Fatalf("failed to marshal map with missing value: %v", err) + } + want := &MessageWithMap{NameMapping: map[int32]string{1: ""}} + if !Equal(got, want) { + t.Errorf("Unmarshaled map with no value was not as expected. got: %v, want %v", got, want) } } diff --git a/vendor/github.com/golang/protobuf/proto/any_test.go b/vendor/github.com/golang/protobuf/proto/any_test.go new file mode 100644 index 0000000000..1a3c22ed41 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/any_test.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + pb "github.com/golang/protobuf/proto/proto3_proto" + testpb "github.com/golang/protobuf/proto/testdata" + anypb "github.com/golang/protobuf/ptypes/any" +) + +var ( + expandedMarshaler = proto.TextMarshaler{ExpandAny: true} + expandedCompactMarshaler = proto.TextMarshaler{Compact: true, ExpandAny: true} +) + +// anyEqual reports whether two messages which may be google.protobuf.Any or may +// contain google.protobuf.Any fields are equal. We can't use proto.Equal for +// comparison, because semantically equivalent messages may be marshaled to +// binary in different tag order. Instead, trust that TextMarshaler with +// ExpandAny option works and compare the text marshaling results. +func anyEqual(got, want proto.Message) bool { + // if messages are proto.Equal, no need to marshal. + if proto.Equal(got, want) { + return true + } + g := expandedMarshaler.Text(got) + w := expandedMarshaler.Text(want) + return g == w +} + +type golden struct { + m proto.Message + t, c string +} + +var goldenMessages = makeGolden() + +func makeGolden() []golden { + nested := &pb.Nested{Bunny: "Monty"} + nb, err := proto.Marshal(nested) + if err != nil { + panic(err) + } + m1 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb}, + } + m2 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "http://[::1]/type.googleapis.com/" + proto.MessageName(nested), Value: nb}, + } + m3 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: `type.googleapis.com/"/` + proto.MessageName(nested), Value: nb}, + } + m4 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "type.googleapis.com/a/path/" + proto.MessageName(nested), Value: nb}, + } + m5 := &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb} + + any1 := &testpb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")} + proto.SetExtension(any1, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("foo")}) + proto.SetExtension(any1, testpb.E_Ext_Text, proto.String("bar")) + any1b, err := proto.Marshal(any1) + if err != nil { + panic(err) + } + any2 := &testpb.MyMessage{Count: proto.Int32(42), Bikeshed: testpb.MyMessage_GREEN.Enum(), RepBytes: [][]byte{[]byte("roboto")}} + proto.SetExtension(any2, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("baz")}) + any2b, err := proto.Marshal(any2) + if err != nil { + panic(err) + } + m6 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, + ManyThings: []*anypb.Any{ + &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any2), Value: any2b}, + &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, + }, + } + + const ( + m1Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/proto3_proto.Nested]: < + bunny: "Monty" + > +> +` + m2Golden = ` +name: "David" +result_count: 47 +anything: < + ["http://[::1]/type.googleapis.com/proto3_proto.Nested"]: < + bunny: "Monty" + > +> +` + m3Golden = ` +name: "David" +result_count: 47 +anything: < + ["type.googleapis.com/\"/proto3_proto.Nested"]: < + bunny: "Monty" + > +> +` + m4Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Monty" + > +> +` + m5Golden = ` +[type.googleapis.com/proto3_proto.Nested]: < + bunny: "Monty" +> +` + m6Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/testdata.MyMessage]: < + count: 47 + name: "David" + [testdata.Ext.more]: < + data: "foo" + > + [testdata.Ext.text]: "bar" + > +> +many_things: < + [type.googleapis.com/testdata.MyMessage]: < + count: 42 + bikeshed: GREEN + rep_bytes: "roboto" + [testdata.Ext.more]: < + data: "baz" + > + > +> +many_things: < + [type.googleapis.com/testdata.MyMessage]: < + count: 47 + name: "David" + [testdata.Ext.more]: < + data: "foo" + > + [testdata.Ext.text]: "bar" + > +> +` + ) + return []golden{ + {m1, strings.TrimSpace(m1Golden) + "\n", strings.TrimSpace(compact(m1Golden)) + " "}, + {m2, strings.TrimSpace(m2Golden) + "\n", strings.TrimSpace(compact(m2Golden)) + " "}, + {m3, strings.TrimSpace(m3Golden) + "\n", strings.TrimSpace(compact(m3Golden)) + " "}, + {m4, strings.TrimSpace(m4Golden) + "\n", strings.TrimSpace(compact(m4Golden)) + " "}, + {m5, strings.TrimSpace(m5Golden) + "\n", strings.TrimSpace(compact(m5Golden)) + " "}, + {m6, strings.TrimSpace(m6Golden) + "\n", strings.TrimSpace(compact(m6Golden)) + " "}, + } +} + +func TestMarshalGolden(t *testing.T) { + for _, tt := range goldenMessages { + if got, want := expandedMarshaler.Text(tt.m), tt.t; got != want { + t.Errorf("message %v: got:\n%s\nwant:\n%s", tt.m, got, want) + } + if got, want := expandedCompactMarshaler.Text(tt.m), tt.c; got != want { + t.Errorf("message %v: got:\n`%s`\nwant:\n`%s`", tt.m, got, want) + } + } +} + +func TestUnmarshalGolden(t *testing.T) { + for _, tt := range goldenMessages { + want := tt.m + got := proto.Clone(tt.m) + got.Reset() + if err := proto.UnmarshalText(tt.t, got); err != nil { + t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.t, err) + } + if !anyEqual(got, want) { + t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.t, got, want) + } + got.Reset() + if err := proto.UnmarshalText(tt.c, got); err != nil { + t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.c, err) + } + if !anyEqual(got, want) { + t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.c, got, want) + } + } +} + +func TestMarshalUnknownAny(t *testing.T) { + m := &pb.Message{ + Anything: &anypb.Any{ + TypeUrl: "foo", + Value: []byte("bar"), + }, + } + want := `anything: < + type_url: "foo" + value: "bar" +> +` + got := expandedMarshaler.Text(m) + if got != want { + t.Errorf("got\n`%s`\nwant\n`%s`", got, want) + } +} + +func TestAmbiguousAny(t *testing.T) { + pb := &anypb.Any{} + err := proto.UnmarshalText(` + type_url: "ttt/proto3_proto.Nested" + value: "\n\x05Monty" + `, pb) + t.Logf("result: %v (error: %v)", expandedMarshaler.Text(pb), err) + if err != nil { + t.Errorf("failed to parse ambiguous Any message: %v", err) + } +} + +func TestUnmarshalOverwriteAny(t *testing.T) { + pb := &anypb.Any{} + err := proto.UnmarshalText(` + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Monty" + > + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Rabbit of Caerbannog" + > + `, pb) + want := `line 7: Any message unpacked multiple times, or "type_url" already set` + if err.Error() != want { + t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) + } +} + +func TestUnmarshalAnyMixAndMatch(t *testing.T) { + pb := &anypb.Any{} + err := proto.UnmarshalText(` + value: "\n\x05Monty" + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Rabbit of Caerbannog" + > + `, pb) + want := `line 5: Any message unpacked multiple times, or "value" already set` + if err.Error() != want { + t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go index 84bbaf4335..e392575b35 100644 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -30,7 +30,7 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Protocol buffer deep copy and merge. -// TODO: MessageSet and RawMessage. +// TODO: RawMessage. package proto @@ -84,9 +84,15 @@ func mergeStruct(out, in reflect.Value) { mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) } - if emIn, ok := in.Addr().Interface().(extendableProto); ok { - emOut := out.Addr().Interface().(extendableProto) - mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) + if emIn, ok := extendable(in.Addr().Interface()); ok { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } } uf := in.FieldByName("XXX_unrecognized") diff --git a/vendor/github.com/golang/protobuf/proto/clone_test.go b/vendor/github.com/golang/protobuf/proto/clone_test.go index 76720f18b7..f607ff49eb 100644 --- a/vendor/github.com/golang/protobuf/proto/clone_test.go +++ b/vendor/github.com/golang/protobuf/proto/clone_test.go @@ -195,6 +195,9 @@ var mergeTests = []struct { NameMapping: map[int32]string{6: "Nigel"}, MsgMapping: map[int64]*pb.FloatingPoint{ 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, + 0x4002: &pb.FloatingPoint{ + F: proto.Float64(2.0), + }, }, ByteMapping: map[bool][]byte{true: []byte("wowsa")}, }, @@ -203,6 +206,12 @@ var mergeTests = []struct { 6: "Bruce", // should be overwritten 7: "Andrew", }, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4002: &pb.FloatingPoint{ + F: proto.Float64(3.0), + Exact: proto.Bool(true), + }, // the entire message should be overwritten + }, }, want: &pb.MessageWithMap{ NameMapping: map[int32]string{ @@ -211,6 +220,9 @@ var mergeTests = []struct { }, MsgMapping: map[int64]*pb.FloatingPoint{ 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, + 0x4002: &pb.FloatingPoint{ + F: proto.Float64(2.0), + }, }, ByteMapping: map[bool][]byte{true: []byte("wowsa")}, }, @@ -254,6 +266,27 @@ var mergeTests = []struct { Union: &pb.Communique_Name{"Bobby Tables"}, }, }, + { + src: &proto3pb.Message{ + Terrain: map[string]*proto3pb.Nested{ + "kay_a": &proto3pb.Nested{Cute: true}, // replace + "kay_b": &proto3pb.Nested{Bunny: "rabbit"}, // insert + }, + }, + dst: &proto3pb.Message{ + Terrain: map[string]*proto3pb.Nested{ + "kay_a": &proto3pb.Nested{Bunny: "lost"}, // replaced + "kay_c": &proto3pb.Nested{Bunny: "bunny"}, // keep + }, + }, + want: &proto3pb.Message{ + Terrain: map[string]*proto3pb.Nested{ + "kay_a": &proto3pb.Nested{Cute: true}, + "kay_b": &proto3pb.Nested{Bunny: "rabbit"}, + "kay_c": &proto3pb.Nested{Bunny: "bunny"}, + }, + }, + }, } func TestMerge(t *testing.T) { diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go index 5810782fd8..aa207298f9 100644 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -61,7 +61,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for // int32, int64, uint32, uint64, bool, and enum // protocol buffer types. func DecodeVarint(buf []byte) (x uint64, n int) { - // x, n already 0 for shift := uint(0); shift < 64; shift += 7 { if n >= len(buf) { return 0, 0 @@ -78,13 +77,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) { return 0, 0 } -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - // x, err already 0 - +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { i := p.index l := len(p.buf) @@ -107,6 +100,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { return } +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + // DecodeFixed64 reads a 64-bit integer from the Buffer. // This is the format for the // fixed64, sfixed64, and double protocol buffer types. @@ -340,6 +434,8 @@ func (p *Buffer) DecodeGroup(pb Message) error { // Buffer and places the decoded result in pb. If the struct // underlying pb does not match the data in the buffer, the results can be // unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. if u, ok := pb.(Unmarshaler); ok { @@ -378,6 +474,11 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group wire := int(u & 0x7) if wire == WireEndGroup { if is_group { + if required > 0 { + // Not enough information to determine the exact field. + // (See below.) + return &RequiredNotSetError{"{Unknown}"} + } return nil // input is satisfied } return fmt.Errorf("proto: %s: wiretype end group for non-group", st) @@ -390,11 +491,12 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group if !ok { // Maybe it's an extension? if prop.extendable { - if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { + if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { if err = o.skip(st, tag, wire); err == nil { - ext := e.ExtensionMap()[int32(tag)] // may be missing + extmap := e.extensionsWrite() + ext := extmap[int32(tag)] // may be missing ext.enc = append(ext.enc, o.buf[oi:o.index]...) - e.ExtensionMap()[int32(tag)] = ext + extmap[int32(tag)] = ext } continue } @@ -768,10 +870,11 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { } } keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() || !valelem.IsValid() { - // We did not decode the key or the value in the map entry. - // Either way, it's an invalid map entry. - return fmt.Errorf("proto: bad map data: missing key/val") + if !keyelem.IsValid() { + keyelem = reflect.Zero(p.mtype.Key()) + } + if !valelem.IsValid() { + valelem = reflect.Zero(p.mtype.Elem()) } v.SetMapIndex(keyelem, valelem) diff --git a/vendor/github.com/golang/protobuf/proto/decode_test.go b/vendor/github.com/golang/protobuf/proto/decode_test.go new file mode 100644 index 0000000000..2c4c31d127 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/decode_test.go @@ -0,0 +1,258 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build go1.7 + +package proto_test + +import ( + "fmt" + "testing" + + "github.com/golang/protobuf/proto" + tpb "github.com/golang/protobuf/proto/proto3_proto" +) + +var ( + bytesBlackhole []byte + msgBlackhole = new(tpb.Message) +) + +// BenchmarkVarint32ArraySmall shows the performance on an array of small int32 fields (1 and +// 2 bytes long). +func BenchmarkVarint32ArraySmall(b *testing.B) { + for i := uint(1); i <= 10; i++ { + dist := genInt32Dist([7]int{0, 3, 1}, 1<2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") ) // The fundamental encoders that put bytes on the wire. @@ -74,6 +82,10 @@ var ( const maxVarintBytes = 10 // maximum length of a varint +// maxMarshalSize is the largest allowed size of an encoded protobuf, +// since C++ and Java use signed int32s for the size. +const maxMarshalSize = 1<<31 - 1 + // EncodeVarint returns the varint encoding of x. // This is the format for the // int32, int64, uint32, uint64, bool, and enum @@ -105,6 +117,11 @@ func (p *Buffer) EncodeVarint(x uint64) error { return nil } +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + return sizeVarint(x) +} + func sizeVarint(x uint64) (n int) { for { n++ @@ -217,10 +234,6 @@ func Marshal(pb Message) ([]byte, error) { } p := NewBuffer(nil) err := p.Marshal(pb) - var state errorState - if err != nil && !state.shouldContinue(err, nil) { - return nil, err - } if p.buf == nil && err == nil { // Return a non-nil slice on success. return []byte{}, nil @@ -249,11 +262,8 @@ func (p *Buffer) Marshal(pb Message) error { // Can the object marshal itself? if m, ok := pb.(Marshaler); ok { data, err := m.Marshal() - if err != nil { - return err - } p.buf = append(p.buf, data...) - return nil + return err } t, base, err := getbase(pb) @@ -265,9 +275,12 @@ func (p *Buffer) Marshal(pb Message) error { } if collectStats { - stats.Encode++ + (stats).Encode++ // Parens are to work around a goimports bug. } + if len(p.buf) > maxMarshalSize { + return ErrTooLarge + } return err } @@ -289,7 +302,7 @@ func Size(pb Message) (n int) { } if collectStats { - stats.Size++ + (stats).Size++ // Parens are to work around a goimports bug. } return @@ -994,7 +1007,6 @@ func size_slice_struct_message(p *Properties, base structPointer) (n int) { if p.isMarshaler { m := structPointer_Interface(structp, p.stype).(Marshaler) data, _ := m.Marshal() - n += len(p.tagcode) n += sizeRawBytes(data) continue } @@ -1053,10 +1065,32 @@ func size_slice_struct_group(p *Properties, base structPointer) (n int) { // Encode an extension map. func (o *Buffer) enc_map(p *Properties, base structPointer) error { - v := *structPointer_ExtMap(base, p.field) - if err := encodeExtensionMap(v); err != nil { + exts := structPointer_ExtMap(base, p.field) + if err := encodeExtensionsMap(*exts); err != nil { + return err + } + + return o.enc_map_body(*exts) +} + +func (o *Buffer) enc_exts(p *Properties, base structPointer) error { + exts := structPointer_Extensions(base, p.field) + + v, mu := exts.extensionsRead() + if v == nil { + return nil + } + + mu.Lock() + defer mu.Unlock() + if err := encodeExtensionsMap(v); err != nil { return err } + + return o.enc_map_body(v) +} + +func (o *Buffer) enc_map_body(v map[int32]Extension) error { // Fast-path for common cases: zero or one extensions. if len(v) <= 1 { for _, e := range v { @@ -1079,8 +1113,13 @@ func (o *Buffer) enc_map(p *Properties, base structPointer) error { } func size_map(p *Properties, base structPointer) int { - v := *structPointer_ExtMap(base, p.field) - return sizeExtensionMap(v) + v := structPointer_ExtMap(base, p.field) + return extensionsMapSize(*v) +} + +func size_exts(p *Properties, base structPointer) int { + v := structPointer_Extensions(base, p.field) + return extensionsSize(v) } // Encode a map field. @@ -1109,22 +1148,16 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { return err } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { return err } return nil } - keys := v.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { + // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. + for _, key := range v.MapKeys() { val := v.MapIndex(key) - // The only illegal map entry values are nil message pointers. - if val.Kind() == reflect.Ptr && val.IsNil() { - return errors.New("proto: map has nil element") - } - keycopy.Set(key) valcopy.Set(val) @@ -1212,13 +1245,18 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { return err } } + if len(o.buf) > maxMarshalSize { + return ErrTooLarge + } } } // Do oneof fields. if prop.oneofMarshaler != nil { m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err != nil { + if err := prop.oneofMarshaler(m, o); err == ErrNil { + return errOneofHasNil + } else if err != nil { return err } } @@ -1226,6 +1264,9 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { // Add unrecognized fields at the end. if prop.unrecField.IsValid() { v := *structPointer_Bytes(base, prop.unrecField) + if len(o.buf)+len(v) > maxMarshalSize { + return ErrTooLarge + } if len(v) > 0 { o.buf = append(o.buf, v...) } @@ -1249,24 +1290,9 @@ func size_struct(prop *StructProperties, base structPointer) (n int) { } // Factor in any oneof fields. - // TODO: This could be faster and use less reflection. - if prop.oneofMarshaler != nil { - sv := reflect.ValueOf(structPointer_Interface(base, prop.stype)).Elem() - for i := 0; i < prop.stype.NumField(); i++ { - fv := sv.Field(i) - if fv.Kind() != reflect.Interface || fv.IsNil() { - continue - } - if prop.stype.Field(i).Tag.Get("protobuf_oneof") == "" { - continue - } - spv := fv.Elem() // interface -> *T - sv := spv.Elem() // *T -> T - sf := sv.Type().Field(0) // StructField inside T - var prop Properties - prop.Init(sf.Type, "whatever", sf.Tag.Get("protobuf"), &sf) - n += prop.size(&prop, toStructPointer(spv)) - } + if prop.oneofSizer != nil { + m := structPointer_Interface(base, prop.stype).(Message) + n += prop.oneofSizer(m) } return diff --git a/vendor/github.com/golang/protobuf/proto/encode_test.go b/vendor/github.com/golang/protobuf/proto/encode_test.go new file mode 100644 index 0000000000..a7209475f1 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/encode_test.go @@ -0,0 +1,85 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build go1.7 + +package proto_test + +import ( + "strconv" + "testing" + + "github.com/golang/protobuf/proto" + tpb "github.com/golang/protobuf/proto/proto3_proto" + "github.com/golang/protobuf/ptypes" +) + +var ( + blackhole []byte +) + +// BenchmarkAny creates increasingly large arbitrary Any messages. The type is always the +// same. +func BenchmarkAny(b *testing.B) { + data := make([]byte, 1<<20) + quantum := 1 << 10 + for i := uint(0); i <= 10; i++ { + b.Run(strconv.Itoa(quantum<= len(o.buf) { + if o.index >= len(o.buf) { break } } @@ -333,10 +473,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { // GetExtensions returns a slice of the extensions present in pb that are also listed in es. // The returned slice has the same length as es; missing extensions will appear as nil elements. func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := pb.(extendableProto) + epb, ok := extendable(pb) if !ok { - err = errors.New("proto: not an extendable proto") - return + return nil, errors.New("proto: not an extendable proto") } extensions = make([]interface{}, len(es)) for i, e := range es { @@ -351,9 +490,44 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e return } +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, ok := extendable(pb) + if !ok { + return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + // SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { - if err := checkExtensionTypes(pb, extension); err != nil { +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + epb, ok := extendable(pb) + if !ok { + return errors.New("proto: not an extendable proto") + } + if err := checkExtensionTypes(epb, extension); err != nil { return err } typ := reflect.TypeOf(extension.ExtensionType) @@ -369,10 +543,23 @@ func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) } - pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} return nil } +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + epb, ok := extendable(pb) + if !ok { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + // A global registry of extensions. // The generated code will register the generated descriptors by calling RegisterExtension. diff --git a/vendor/github.com/golang/protobuf/proto/extensions_test.go b/vendor/github.com/golang/protobuf/proto/extensions_test.go index 72552767d8..b6d9114c56 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions_test.go +++ b/vendor/github.com/golang/protobuf/proto/extensions_test.go @@ -32,19 +32,22 @@ package proto_test import ( + "bytes" "fmt" "reflect" + "sort" "testing" "github.com/golang/protobuf/proto" pb "github.com/golang/protobuf/proto/testdata" + "golang.org/x/sync/errgroup" ) func TestGetExtensionsWithMissingExtensions(t *testing.T) { msg := &pb.MyMessage{} ext1 := &pb.Ext{} if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { - t.Fatalf("Could not set ext1: %s", ext1) + t.Fatalf("Could not set ext1: %s", err) } exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ pb.E_Ext_More, @@ -61,6 +64,58 @@ func TestGetExtensionsWithMissingExtensions(t *testing.T) { } } +func TestExtensionDescsWithMissingExtensions(t *testing.T) { + msg := &pb.MyMessage{Count: proto.Int32(0)} + extdesc1 := pb.E_Ext_More + if descs, err := proto.ExtensionDescs(msg); len(descs) != 0 || err != nil { + t.Errorf("proto.ExtensionDescs: got %d descs, error %v; want 0, nil", len(descs), err) + } + + ext1 := &pb.Ext{} + if err := proto.SetExtension(msg, extdesc1, ext1); err != nil { + t.Fatalf("Could not set ext1: %s", err) + } + extdesc2 := &proto.ExtensionDesc{ + ExtendedType: (*pb.MyMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 123456789, + Name: "a.b", + Tag: "varint,123456789,opt", + } + ext2 := proto.Bool(false) + if err := proto.SetExtension(msg, extdesc2, ext2); err != nil { + t.Fatalf("Could not set ext2: %s", err) + } + + b, err := proto.Marshal(msg) + if err != nil { + t.Fatalf("Could not marshal msg: %v", err) + } + if err := proto.Unmarshal(b, msg); err != nil { + t.Fatalf("Could not unmarshal into msg: %v", err) + } + + descs, err := proto.ExtensionDescs(msg) + if err != nil { + t.Fatalf("proto.ExtensionDescs: got error %v", err) + } + sortExtDescs(descs) + wantDescs := []*proto.ExtensionDesc{extdesc1, &proto.ExtensionDesc{Field: extdesc2.Field}} + if !reflect.DeepEqual(descs, wantDescs) { + t.Errorf("proto.ExtensionDescs(msg) sorted extension ids: got %+v, want %+v", descs, wantDescs) + } +} + +type ExtensionDescSlice []*proto.ExtensionDesc + +func (s ExtensionDescSlice) Len() int { return len(s) } +func (s ExtensionDescSlice) Less(i, j int) bool { return s[i].Field < s[j].Field } +func (s ExtensionDescSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func sortExtDescs(s []*proto.ExtensionDesc) { + sort.Sort(ExtensionDescSlice(s)) +} + func TestGetExtensionStability(t *testing.T) { check := func(m *pb.MyMessage) bool { ext1, err := proto.GetExtension(m, pb.E_Ext_More) @@ -290,3 +345,192 @@ func TestNilExtension(t *testing.T) { // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal. } + +func TestMarshalUnmarshalRepeatedExtension(t *testing.T) { + // Add a repeated extension to the result. + tests := []struct { + name string + ext []*pb.ComplexExtension + }{ + { + "two fields", + []*pb.ComplexExtension{ + {First: proto.Int32(7)}, + {Second: proto.Int32(11)}, + }, + }, + { + "repeated field", + []*pb.ComplexExtension{ + {Third: []int32{1000}}, + {Third: []int32{2000}}, + }, + }, + { + "two fields and repeated field", + []*pb.ComplexExtension{ + {Third: []int32{1000}}, + {First: proto.Int32(9)}, + {Second: proto.Int32(21)}, + {Third: []int32{2000}}, + }, + }, + } + for _, test := range tests { + // Marshal message with a repeated extension. + msg1 := new(pb.OtherMessage) + err := proto.SetExtension(msg1, pb.E_RComplex, test.ext) + if err != nil { + t.Fatalf("[%s] Error setting extension: %v", test.name, err) + } + b, err := proto.Marshal(msg1) + if err != nil { + t.Fatalf("[%s] Error marshaling message: %v", test.name, err) + } + + // Unmarshal and read the merged proto. + msg2 := new(pb.OtherMessage) + err = proto.Unmarshal(b, msg2) + if err != nil { + t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err) + } + e, err := proto.GetExtension(msg2, pb.E_RComplex) + if err != nil { + t.Fatalf("[%s] Error getting extension: %v", test.name, err) + } + ext := e.([]*pb.ComplexExtension) + if ext == nil { + t.Fatalf("[%s] Invalid extension", test.name) + } + if !reflect.DeepEqual(ext, test.ext) { + t.Errorf("[%s] Wrong value for ComplexExtension: got: %v want: %v\n", test.name, ext, test.ext) + } + } +} + +func TestUnmarshalRepeatingNonRepeatedExtension(t *testing.T) { + // We may see multiple instances of the same extension in the wire + // format. For example, the proto compiler may encode custom options in + // this way. Here, we verify that we merge the extensions together. + tests := []struct { + name string + ext []*pb.ComplexExtension + }{ + { + "two fields", + []*pb.ComplexExtension{ + {First: proto.Int32(7)}, + {Second: proto.Int32(11)}, + }, + }, + { + "repeated field", + []*pb.ComplexExtension{ + {Third: []int32{1000}}, + {Third: []int32{2000}}, + }, + }, + { + "two fields and repeated field", + []*pb.ComplexExtension{ + {Third: []int32{1000}}, + {First: proto.Int32(9)}, + {Second: proto.Int32(21)}, + {Third: []int32{2000}}, + }, + }, + } + for _, test := range tests { + var buf bytes.Buffer + var want pb.ComplexExtension + + // Generate a serialized representation of a repeated extension + // by catenating bytes together. + for i, e := range test.ext { + // Merge to create the wanted proto. + proto.Merge(&want, e) + + // serialize the message + msg := new(pb.OtherMessage) + err := proto.SetExtension(msg, pb.E_Complex, e) + if err != nil { + t.Fatalf("[%s] Error setting extension %d: %v", test.name, i, err) + } + b, err := proto.Marshal(msg) + if err != nil { + t.Fatalf("[%s] Error marshaling message %d: %v", test.name, i, err) + } + buf.Write(b) + } + + // Unmarshal and read the merged proto. + msg2 := new(pb.OtherMessage) + err := proto.Unmarshal(buf.Bytes(), msg2) + if err != nil { + t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err) + } + e, err := proto.GetExtension(msg2, pb.E_Complex) + if err != nil { + t.Fatalf("[%s] Error getting extension: %v", test.name, err) + } + ext := e.(*pb.ComplexExtension) + if ext == nil { + t.Fatalf("[%s] Invalid extension", test.name) + } + if !reflect.DeepEqual(*ext, want) { + t.Errorf("[%s] Wrong value for ComplexExtension: got: %s want: %s\n", test.name, ext, want) + } + } +} + +func TestClearAllExtensions(t *testing.T) { + // unregistered extension + desc := &proto.ExtensionDesc{ + ExtendedType: (*pb.MyMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 101010100, + Name: "emptyextension", + Tag: "varint,0,opt", + } + m := &pb.MyMessage{} + if proto.HasExtension(m, desc) { + t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m)) + } + if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil { + t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err) + } + if !proto.HasExtension(m, desc) { + t.Errorf("proto.HasExtension(%s): got false, want true", proto.MarshalTextString(m)) + } + proto.ClearAllExtensions(m) + if proto.HasExtension(m, desc) { + t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m)) + } +} + +func TestMarshalRace(t *testing.T) { + // unregistered extension + desc := &proto.ExtensionDesc{ + ExtendedType: (*pb.MyMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 101010100, + Name: "emptyextension", + Tag: "varint,0,opt", + } + + m := &pb.MyMessage{Count: proto.Int32(4)} + if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil { + t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err) + } + + var g errgroup.Group + for n := 3; n > 0; n-- { + g.Go(func() error { + _, err := proto.Marshal(m) + return err + }) + } + if err := g.Wait(); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index abbf7583ed..1c225504a0 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -70,6 +70,11 @@ for a protocol buffer variable v: with distinguished wrapper types for each possible field value. - Marshal and Unmarshal are functions to encode and decode the wire format. +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + The simplest way to describe this is to see an example. Given file test.proto, containing @@ -216,7 +221,7 @@ The resulting file, test.pb.go, is: To create and play with a Test object: -package main + package main import ( "log" @@ -229,6 +234,7 @@ package main test := &pb.Test{ Label: proto.String("hello"), Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, Optionalgroup: &pb.Test_OptionalGroup{ RequiredField: proto.String("good bye"), }, @@ -301,7 +307,7 @@ func GetStats() Stats { return stats } // temporary Buffer and are fine for most applications. type Buffer struct { buf []byte // encode/decode byte stream - index int // write point + index int // read point // pools of basic types to amortize allocation. bools []bool @@ -881,3 +887,11 @@ func isProto3Zero(v reflect.Value) bool { } return false } + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion1 = true diff --git a/vendor/github.com/golang/protobuf/proto/map_test.go b/vendor/github.com/golang/protobuf/proto/map_test.go new file mode 100644 index 0000000000..313e879245 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/map_test.go @@ -0,0 +1,46 @@ +package proto_test + +import ( + "fmt" + "testing" + + "github.com/golang/protobuf/proto" + ppb "github.com/golang/protobuf/proto/proto3_proto" +) + +func marshalled() []byte { + m := &ppb.IntMaps{} + for i := 0; i < 1000; i++ { + m.Maps = append(m.Maps, &ppb.IntMap{ + Rtt: map[int32]int32{1: 2}, + }) + } + b, err := proto.Marshal(m) + if err != nil { + panic(fmt.Sprintf("Can't marshal %+v: %v", m, err)) + } + return b +} + +func BenchmarkConcurrentMapUnmarshal(b *testing.B) { + in := marshalled() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + var out ppb.IntMaps + if err := proto.Unmarshal(in, &out); err != nil { + b.Errorf("Can't unmarshal ppb.IntMaps: %v", err) + } + } + }) +} + +func BenchmarkSequentialMapUnmarshal(b *testing.B) { + in := marshalled() + b.ResetTimer() + for i := 0; i < b.N; i++ { + var out ppb.IntMaps + if err := proto.Unmarshal(in, &out); err != nil { + b.Errorf("Can't unmarshal ppb.IntMaps: %v", err) + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go index 9d912bce19..fd982decd6 100644 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -44,11 +44,11 @@ import ( "sort" ) -// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID. +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. // A message type ID is required for storing a protocol buffer in a message set. -var ErrNoMessageTypeId = errors.New("proto does not have a message type ID") +var errNoMessageTypeID = errors.New("proto does not have a message type ID") -// The first two types (_MessageSet_Item and MessageSet) +// The first two types (_MessageSet_Item and messageSet) // model what the protocol compiler produces for the following protocol message: // message MessageSet { // repeated group Item = 1 { @@ -58,27 +58,20 @@ var ErrNoMessageTypeId = errors.New("proto does not have a message type ID") // } // That is the MessageSet wire format. We can't use a proto to generate these // because that would introduce a circular dependency between it and this package. -// -// When a proto1 proto has a field that looks like: -// optional message info = 3; -// the protocol compiler produces a field in the generated struct that looks like: -// Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"` -// The package is automatically inserted so there is no need for that proto file to -// import this package. type _MessageSet_Item struct { TypeId *int32 `protobuf:"varint,2,req,name=type_id"` Message []byte `protobuf:"bytes,3,req,name=message"` } -type MessageSet struct { +type messageSet struct { Item []*_MessageSet_Item `protobuf:"group,1,rep"` XXX_unrecognized []byte // TODO: caching? } -// Make sure MessageSet is a Message. -var _ Message = (*MessageSet)(nil) +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) // messageTypeIder is an interface satisfied by a protocol buffer type // that may be stored in a MessageSet. @@ -86,7 +79,7 @@ type messageTypeIder interface { MessageTypeId() int32 } -func (ms *MessageSet) find(pb Message) *_MessageSet_Item { +func (ms *messageSet) find(pb Message) *_MessageSet_Item { mti, ok := pb.(messageTypeIder) if !ok { return nil @@ -100,24 +93,24 @@ func (ms *MessageSet) find(pb Message) *_MessageSet_Item { return nil } -func (ms *MessageSet) Has(pb Message) bool { +func (ms *messageSet) Has(pb Message) bool { if ms.find(pb) != nil { return true } return false } -func (ms *MessageSet) Unmarshal(pb Message) error { +func (ms *messageSet) Unmarshal(pb Message) error { if item := ms.find(pb); item != nil { return Unmarshal(item.Message, pb) } if _, ok := pb.(messageTypeIder); !ok { - return ErrNoMessageTypeId + return errNoMessageTypeID } return nil // TODO: return error instead? } -func (ms *MessageSet) Marshal(pb Message) error { +func (ms *messageSet) Marshal(pb Message) error { msg, err := Marshal(pb) if err != nil { return err @@ -130,7 +123,7 @@ func (ms *MessageSet) Marshal(pb Message) error { mti, ok := pb.(messageTypeIder) if !ok { - return ErrNoMessageTypeId + return errNoMessageTypeID } mtid := mti.MessageTypeId() @@ -141,9 +134,9 @@ func (ms *MessageSet) Marshal(pb Message) error { return nil } -func (ms *MessageSet) Reset() { *ms = MessageSet{} } -func (ms *MessageSet) String() string { return CompactTextString(ms) } -func (*MessageSet) ProtoMessage() {} +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} // Support for the message_set_wire_format message option. @@ -156,9 +149,21 @@ func skipVarint(buf []byte) []byte { // MarshalMessageSet encodes the extension map represented by m in the message set wire format. // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { - if err := encodeExtensionMap(m); err != nil { - return nil, err +func MarshalMessageSet(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + if err := encodeExtensions(exts); err != nil { + return nil, err + } + m, _ = exts.extensionsRead() + case map[int32]Extension: + if err := encodeExtensionsMap(exts); err != nil { + return nil, err + } + m = exts + default: + return nil, errors.New("proto: not an extension map") } // Sort extension IDs to provide a deterministic encoding. @@ -169,7 +174,7 @@ func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { } sort.Ints(ids) - ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} for _, id := range ids { e := m[int32(id)] // Remove the wire type and field number varint, as well as the length varint. @@ -185,8 +190,18 @@ func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { - ms := new(MessageSet) +func UnmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) if err := Unmarshal(buf, ms); err != nil { return err } @@ -216,7 +231,16 @@ func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { // MarshalMessageSetJSON encodes the extension map represented by m in JSON format. // It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m, _ = exts.extensionsRead() + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } var b bytes.Buffer b.WriteByte('{') @@ -259,7 +283,7 @@ func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { // UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. // It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { // Common-case fast path. if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { return nil diff --git a/vendor/github.com/golang/protobuf/proto/message_set_test.go b/vendor/github.com/golang/protobuf/proto/message_set_test.go index 7c29bccf4b..353a3ea769 100644 --- a/vendor/github.com/golang/protobuf/proto/message_set_test.go +++ b/vendor/github.com/golang/protobuf/proto/message_set_test.go @@ -38,7 +38,7 @@ import ( func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { // Check that a repeated message set entry will be concatenated. - in := &MessageSet{ + in := &messageSet{ Item: []*_MessageSet_Item{ {TypeId: Int32(12345), Message: []byte("hoo")}, {TypeId: Int32(12345), Message: []byte("hah")}, @@ -50,13 +50,13 @@ func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { } t.Logf("Marshaled bytes: %q", b) - m := make(map[int32]Extension) - if err := UnmarshalMessageSet(b, m); err != nil { + var extensions XXX_InternalExtensions + if err := UnmarshalMessageSet(b, &extensions); err != nil { t.Fatalf("UnmarshalMessageSet: %v", err) } - ext, ok := m[12345] + ext, ok := extensions.p.extensionMap[12345] if !ok { - t.Fatalf("Didn't retrieve extension 12345; map is %v", m) + t.Fatalf("Didn't retrieve extension 12345; map is %v", extensions.p.extensionMap) } // Skip wire type/field number and length varints. got := skipVarint(skipVarint(ext.enc)) diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go index 749919d250..fb512e2e16 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine +// +build appengine js // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can @@ -139,6 +139,11 @@ func structPointer_StringSlice(p structPointer, f field) *[]string { return structPointer_ifield(p, f).(*[]string) } +// Extensions returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return structPointer_ifield(p, f).(*XXX_InternalExtensions) +} + // ExtMap returns the address of an extension map field in the struct. func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { return structPointer_ifield(p, f).(*map[int32]Extension) diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go index e9be0fe92e..6b5567d47c 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine +// +build !appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -126,6 +126,10 @@ func structPointer_StringSlice(p structPointer, f field) *[]string { } // ExtMap returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) } diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index ec3125812a..ec2289c005 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -37,6 +37,7 @@ package proto import ( "fmt" + "log" "os" "reflect" "sort" @@ -90,6 +91,9 @@ type oneofMarshaler func(Message, *Buffer) error // A oneofUnmarshaler does the unmarshaling for a oneof field in a message. type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) +// A oneofSizer does the sizing for all oneof fields in a message. +type oneofSizer func(Message) int + // tagMap is an optimization over map[int]int for typical protocol buffer // use-cases. Encoded protocol buffers are often in tag order with small tag // numbers. @@ -141,6 +145,7 @@ type StructProperties struct { oneofMarshaler oneofMarshaler oneofUnmarshaler oneofUnmarshaler + oneofSizer oneofSizer stype reflect.Type // OneofTypes contains information about the oneof fields in this message. @@ -168,6 +173,7 @@ func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order type Properties struct { Name string // name of the field, for error messages OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc Wire string WireType int Tag int @@ -224,8 +230,9 @@ func (p *Properties) String() string { if p.Packed { s += ",packed" } - if p.OrigName != p.Name { - s += ",name=" + p.OrigName + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName } if p.proto3 { s += ",proto3" @@ -305,6 +312,8 @@ func (p *Properties) Parse(s string) { p.Packed = true case strings.HasPrefix(f, "name="): p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] case strings.HasPrefix(f, "enum="): p.Enum = f[5:] case f == "proto3": @@ -464,17 +473,13 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock p.dec = (*Buffer).dec_slice_int64 p.packedDec = (*Buffer).dec_slice_packed_int64 case reflect.Uint8: - p.enc = (*Buffer).enc_slice_byte p.dec = (*Buffer).dec_slice_byte - p.size = size_slice_byte - // This is a []byte, which is either a bytes field, - // or the value of a map field. In the latter case, - // we always encode an empty []byte, so we should not - // use the proto3 enc/size funcs. - // f == nil iff this is the key/value of a map field. - if p.proto3 && f != nil { + if p.proto3 { p.enc = (*Buffer).enc_proto3_slice_byte p.size = size_proto3_slice_byte + } else { + p.enc = (*Buffer).enc_slice_byte + p.size = size_slice_byte } case reflect.Float32, reflect.Float64: switch t2.Bits() { @@ -673,7 +678,8 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { propertiesMap[t] = prop // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || + reflect.PtrTo(t).Implements(extendableProtoV1Type) prop.unrecField = invalidField prop.Prop = make([]*Properties, t.NumField()) prop.order = make([]int, t.NumField()) @@ -684,15 +690,22 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { name := f.Name p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - if f.Name == "XXX_extensions" { // special case + if f.Name == "XXX_InternalExtensions" { // special case + p.enc = (*Buffer).enc_exts + p.dec = nil // not needed + p.size = size_exts + } else if f.Name == "XXX_extensions" { // special case p.enc = (*Buffer).enc_map p.dec = nil // not needed p.size = size_map - } - if f.Name == "XXX_unrecognized" { // special case + } else if f.Name == "XXX_unrecognized" { // special case prop.unrecField = toField(&f) } - oneof := f.Tag.Get("protobuf_oneof") != "" // special case + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } prop.Prop[i] = p prop.order[i] = i if debug { @@ -702,7 +715,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } print("\n") } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof { + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") } } @@ -711,11 +724,11 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { sort.Sort(prop) type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), []interface{}) + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) } if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, oots = om.XXX_OneofFuncs() + prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() prop.stype = t // Interpret oneof metadata. @@ -809,3 +822,51 @@ func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[ func EnumValueMap(enumType string) map[string]int32 { return enumValueMaps[enumType] } + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypes = make(map[string]reflect.Type) + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypes[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +func MessageType(name string) reflect.Type { return protoTypes[name] } + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go index 37c7782092..cc4d0489f1 100644 --- a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go +++ b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go @@ -12,14 +12,27 @@ It has these top-level messages: Message Nested MessageWithMap + IntMap + IntMaps */ package proto3_proto import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" import testdata "github.com/golang/protobuf/proto/testdata" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type Message_Humour int32 @@ -46,25 +59,96 @@ var Message_Humour_value = map[string]int32{ func (x Message_Humour) String() string { return proto.EnumName(Message_Humour_name, int32(x)) } +func (Message_Humour) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } type Message struct { Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` - HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm" json:"height_in_cm,omitempty"` + HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,json=heightInCm" json:"height_in_cm,omitempty"` Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` - ResultCount int64 `protobuf:"varint,7,opt,name=result_count" json:"result_count,omitempty"` - TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman" json:"true_scotsman,omitempty"` + ResultCount int64 `protobuf:"varint,7,opt,name=result_count,json=resultCount" json:"result_count,omitempty"` + TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,json=trueScotsman" json:"true_scotsman,omitempty"` Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"` - Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"` + Key []uint64 `protobuf:"varint,5,rep,packed,name=key" json:"key,omitempty"` + ShortKey []int32 `protobuf:"varint,19,rep,packed,name=short_key,json=shortKey" json:"short_key,omitempty"` Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` + RFunny []Message_Humour `protobuf:"varint,16,rep,packed,name=r_funny,json=rFunny,enum=proto3_proto.Message_Humour" json:"r_funny,omitempty"` Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field" json:"proto2_field,omitempty"` - Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field,json=proto2Field" json:"proto2_field,omitempty"` + Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value,json=proto2Value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Anything *google_protobuf.Any `protobuf:"bytes,14,opt,name=anything" json:"anything,omitempty"` + ManyThings []*google_protobuf.Any `protobuf:"bytes,15,rep,name=many_things,json=manyThings" json:"many_things,omitempty"` + Submessage *Message `protobuf:"bytes,17,opt,name=submessage" json:"submessage,omitempty"` + Children []*Message `protobuf:"bytes,18,rep,name=children" json:"children,omitempty"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Message) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Message) GetHilarity() Message_Humour { + if m != nil { + return m.Hilarity + } + return Message_UNKNOWN +} + +func (m *Message) GetHeightInCm() uint32 { + if m != nil { + return m.HeightInCm + } + return 0 +} + +func (m *Message) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *Message) GetResultCount() int64 { + if m != nil { + return m.ResultCount + } + return 0 } -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} +func (m *Message) GetTrueScotsman() bool { + if m != nil { + return m.TrueScotsman + } + return false +} + +func (m *Message) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +func (m *Message) GetKey() []uint64 { + if m != nil { + return m.Key + } + return nil +} + +func (m *Message) GetShortKey() []int32 { + if m != nil { + return m.ShortKey + } + return nil +} func (m *Message) GetNested() *Nested { if m != nil { @@ -73,6 +157,13 @@ func (m *Message) GetNested() *Nested { return nil } +func (m *Message) GetRFunny() []Message_Humour { + if m != nil { + return m.RFunny + } + return nil +} + func (m *Message) GetTerrain() map[string]*Nested { if m != nil { return m.Terrain @@ -94,21 +185,66 @@ func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults { return nil } +func (m *Message) GetAnything() *google_protobuf.Any { + if m != nil { + return m.Anything + } + return nil +} + +func (m *Message) GetManyThings() []*google_protobuf.Any { + if m != nil { + return m.ManyThings + } + return nil +} + +func (m *Message) GetSubmessage() *Message { + if m != nil { + return m.Submessage + } + return nil +} + +func (m *Message) GetChildren() []*Message { + if m != nil { + return m.Children + } + return nil +} + type Nested struct { Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"` + Cute bool `protobuf:"varint,2,opt,name=cute" json:"cute,omitempty"` +} + +func (m *Nested) Reset() { *m = Nested{} } +func (m *Nested) String() string { return proto.CompactTextString(m) } +func (*Nested) ProtoMessage() {} +func (*Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Nested) GetBunny() string { + if m != nil { + return m.Bunny + } + return "" } -func (m *Nested) Reset() { *m = Nested{} } -func (m *Nested) String() string { return proto.CompactTextString(m) } -func (*Nested) ProtoMessage() {} +func (m *Nested) GetCute() bool { + if m != nil { + return m.Cute + } + return false +} type MessageWithMap struct { - ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` + ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } -func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } -func (*MessageWithMap) ProtoMessage() {} +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} +func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } func (m *MessageWithMap) GetByteMapping() map[bool][]byte { if m != nil { @@ -117,6 +253,95 @@ func (m *MessageWithMap) GetByteMapping() map[bool][]byte { return nil } +type IntMap struct { + Rtt map[int32]int32 `protobuf:"bytes,1,rep,name=rtt" json:"rtt,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` +} + +func (m *IntMap) Reset() { *m = IntMap{} } +func (m *IntMap) String() string { return proto.CompactTextString(m) } +func (*IntMap) ProtoMessage() {} +func (*IntMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *IntMap) GetRtt() map[int32]int32 { + if m != nil { + return m.Rtt + } + return nil +} + +type IntMaps struct { + Maps []*IntMap `protobuf:"bytes,1,rep,name=maps" json:"maps,omitempty"` +} + +func (m *IntMaps) Reset() { *m = IntMaps{} } +func (m *IntMaps) String() string { return proto.CompactTextString(m) } +func (*IntMaps) ProtoMessage() {} +func (*IntMaps) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *IntMaps) GetMaps() []*IntMap { + if m != nil { + return m.Maps + } + return nil +} + func init() { + proto.RegisterType((*Message)(nil), "proto3_proto.Message") + proto.RegisterType((*Nested)(nil), "proto3_proto.Nested") + proto.RegisterType((*MessageWithMap)(nil), "proto3_proto.MessageWithMap") + proto.RegisterType((*IntMap)(nil), "proto3_proto.IntMap") + proto.RegisterType((*IntMaps)(nil), "proto3_proto.IntMaps") proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) } + +func init() { proto.RegisterFile("proto3_proto/proto3.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 733 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x53, 0x6d, 0x6f, 0xf3, 0x34, + 0x14, 0x25, 0x4d, 0x5f, 0xd2, 0x9b, 0x74, 0x0b, 0x5e, 0x91, 0xbc, 0x02, 0x52, 0x28, 0x12, 0x8a, + 0x78, 0x49, 0xa1, 0xd3, 0xd0, 0x84, 0x10, 0x68, 0x1b, 0x9b, 0xa8, 0xd6, 0x95, 0xca, 0xdd, 0x98, + 0xf8, 0x14, 0xa5, 0xad, 0xdb, 0x46, 0x34, 0x4e, 0x49, 0x1c, 0xa4, 0xfc, 0x1d, 0xfe, 0x28, 0x8f, + 0x6c, 0xa7, 0x5d, 0x36, 0x65, 0xcf, 0xf3, 0x29, 0xf6, 0xf1, 0xb9, 0xf7, 0x9c, 0x1c, 0x5f, 0xc3, + 0xe9, 0x2e, 0x89, 0x79, 0x7c, 0xe6, 0xcb, 0xcf, 0x40, 0x6d, 0x3c, 0xf9, 0x41, 0x56, 0xf9, 0xa8, + 0x77, 0xba, 0x8e, 0xe3, 0xf5, 0x96, 0x2a, 0xca, 0x3c, 0x5b, 0x0d, 0x02, 0x96, 0x2b, 0x62, 0xef, + 0x84, 0xd3, 0x94, 0x2f, 0x03, 0x1e, 0x0c, 0xc4, 0x42, 0x81, 0xfd, 0xff, 0x5b, 0xd0, 0xba, 0xa7, + 0x69, 0x1a, 0xac, 0x29, 0x42, 0x50, 0x67, 0x41, 0x44, 0xb1, 0xe6, 0x68, 0x6e, 0x9b, 0xc8, 0x35, + 0xba, 0x00, 0x63, 0x13, 0x6e, 0x83, 0x24, 0xe4, 0x39, 0xae, 0x39, 0x9a, 0x7b, 0x34, 0xfc, 0xcc, + 0x2b, 0x0b, 0x7a, 0x45, 0xb1, 0xf7, 0x7b, 0x16, 0xc5, 0x59, 0x42, 0x0e, 0x6c, 0xe4, 0x80, 0xb5, + 0xa1, 0xe1, 0x7a, 0xc3, 0xfd, 0x90, 0xf9, 0x8b, 0x08, 0xeb, 0x8e, 0xe6, 0x76, 0x08, 0x28, 0x6c, + 0xc4, 0xae, 0x23, 0xa1, 0x27, 0xec, 0xe0, 0xba, 0xa3, 0xb9, 0x16, 0x91, 0x6b, 0xf4, 0x05, 0x58, + 0x09, 0x4d, 0xb3, 0x2d, 0xf7, 0x17, 0x71, 0xc6, 0x38, 0x6e, 0x39, 0x9a, 0xab, 0x13, 0x53, 0x61, + 0xd7, 0x02, 0x42, 0x5f, 0x42, 0x87, 0x27, 0x19, 0xf5, 0xd3, 0x45, 0xcc, 0xd3, 0x28, 0x60, 0xd8, + 0x70, 0x34, 0xd7, 0x20, 0x96, 0x00, 0x67, 0x05, 0x86, 0xba, 0xd0, 0x48, 0x17, 0x71, 0x42, 0x71, + 0xdb, 0xd1, 0xdc, 0x1a, 0x51, 0x1b, 0x64, 0x83, 0xfe, 0x37, 0xcd, 0x71, 0xc3, 0xd1, 0xdd, 0x3a, + 0x11, 0x4b, 0xf4, 0x29, 0xb4, 0xd3, 0x4d, 0x9c, 0x70, 0x5f, 0xe0, 0x27, 0x8e, 0xee, 0x36, 0x88, + 0x21, 0x81, 0x3b, 0x9a, 0xa3, 0x6f, 0xa1, 0xc9, 0x68, 0xca, 0xe9, 0x12, 0x37, 0x1d, 0xcd, 0x35, + 0x87, 0xdd, 0x97, 0xbf, 0x3e, 0x91, 0x67, 0xa4, 0xe0, 0xa0, 0x73, 0x68, 0x25, 0xfe, 0x2a, 0x63, + 0x2c, 0xc7, 0xb6, 0xa3, 0x7f, 0x30, 0xa9, 0x66, 0x72, 0x2b, 0xb8, 0xe8, 0x67, 0x68, 0x71, 0x9a, + 0x24, 0x41, 0xc8, 0x30, 0x38, 0xba, 0x6b, 0x0e, 0xfb, 0xd5, 0x65, 0x0f, 0x8a, 0x74, 0xc3, 0x78, + 0x92, 0x93, 0x7d, 0x09, 0xba, 0x00, 0x75, 0xff, 0x43, 0x7f, 0x15, 0xd2, 0xed, 0x12, 0x9b, 0xd2, + 0xe8, 0x27, 0xde, 0xfe, 0xae, 0xbd, 0x59, 0x36, 0xff, 0x8d, 0xae, 0x82, 0x6c, 0xcb, 0x53, 0x62, + 0x2a, 0xea, 0xad, 0x60, 0xa2, 0xd1, 0xa1, 0xf2, 0xdf, 0x60, 0x9b, 0x51, 0xdc, 0x91, 0xe2, 0x5f, + 0x55, 0x8b, 0x4f, 0x25, 0xf3, 0x4f, 0x41, 0x54, 0x06, 0x8a, 0x56, 0x12, 0x41, 0xdf, 0x83, 0x11, + 0xb0, 0x9c, 0x6f, 0x42, 0xb6, 0xc6, 0x47, 0x45, 0x52, 0x6a, 0x0e, 0xbd, 0xfd, 0x1c, 0x7a, 0x97, + 0x2c, 0x27, 0x07, 0x16, 0x3a, 0x07, 0x33, 0x0a, 0x58, 0xee, 0xcb, 0x5d, 0x8a, 0x8f, 0xa5, 0x76, + 0x75, 0x11, 0x08, 0xe2, 0x83, 0xe4, 0xa1, 0x73, 0x80, 0x34, 0x9b, 0x47, 0xca, 0x14, 0xfe, 0xb8, + 0xf8, 0xd7, 0x2a, 0xc7, 0xa4, 0x44, 0x44, 0x3f, 0x80, 0xb1, 0xd8, 0x84, 0xdb, 0x65, 0x42, 0x19, + 0x46, 0x52, 0xea, 0x8d, 0xa2, 0x03, 0xad, 0x37, 0x05, 0xab, 0x1c, 0xf8, 0x7e, 0x72, 0xd4, 0xd3, + 0x90, 0x93, 0xf3, 0x35, 0x34, 0x54, 0x70, 0xb5, 0xf7, 0xcc, 0x86, 0xa2, 0xfc, 0x54, 0xbb, 0xd0, + 0x7a, 0x8f, 0x60, 0xbf, 0x4e, 0xb1, 0xa2, 0xeb, 0x37, 0x2f, 0xbb, 0xbe, 0x71, 0x91, 0xcf, 0x6d, + 0xfb, 0xbf, 0x42, 0x53, 0x0d, 0x14, 0x32, 0xa1, 0xf5, 0x38, 0xb9, 0x9b, 0xfc, 0xf1, 0x34, 0xb1, + 0x3f, 0x42, 0x06, 0xd4, 0xa7, 0x8f, 0x93, 0x99, 0xad, 0xa1, 0x0e, 0xb4, 0x67, 0xe3, 0xcb, 0xe9, + 0xec, 0x61, 0x74, 0x7d, 0x67, 0xd7, 0xd0, 0x31, 0x98, 0x57, 0xa3, 0xf1, 0xd8, 0xbf, 0xba, 0x1c, + 0x8d, 0x6f, 0xfe, 0xb2, 0xf5, 0xfe, 0x10, 0x9a, 0xca, 0xac, 0x78, 0x33, 0x73, 0x39, 0xbe, 0xca, + 0x8f, 0xda, 0x88, 0x57, 0xba, 0xc8, 0xb8, 0x32, 0x64, 0x10, 0xb9, 0xee, 0xff, 0xa7, 0xc1, 0x51, + 0x91, 0xd9, 0x53, 0xc8, 0x37, 0xf7, 0xc1, 0x0e, 0x4d, 0xc1, 0x9a, 0xe7, 0x9c, 0xfa, 0x51, 0xb0, + 0xdb, 0x89, 0x39, 0xd0, 0x64, 0xce, 0xdf, 0x55, 0xe6, 0x5c, 0xd4, 0x78, 0x57, 0x39, 0xa7, 0xf7, + 0x8a, 0x5f, 0x4c, 0xd5, 0xfc, 0x19, 0xe9, 0xfd, 0x02, 0xf6, 0x6b, 0x42, 0x39, 0x30, 0x43, 0x05, + 0xd6, 0x2d, 0x07, 0x66, 0x95, 0x93, 0xf9, 0x07, 0x9a, 0x23, 0xc6, 0x85, 0xb7, 0x01, 0xe8, 0x09, + 0xe7, 0x85, 0xa5, 0xcf, 0x5f, 0x5a, 0x52, 0x14, 0x8f, 0x70, 0xae, 0x2c, 0x08, 0x66, 0xef, 0x47, + 0x30, 0xf6, 0x40, 0x59, 0xb2, 0x51, 0x21, 0xd9, 0x28, 0x4b, 0x9e, 0x41, 0x4b, 0xf5, 0x4b, 0x91, + 0x0b, 0xf5, 0x28, 0xd8, 0xa5, 0x85, 0x68, 0xb7, 0x4a, 0x94, 0x48, 0xc6, 0xbc, 0xa9, 0x8e, 0xde, + 0x05, 0x00, 0x00, 0xff, 0xff, 0x75, 0x38, 0xad, 0x84, 0xe4, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/proto/proto3_test.go b/vendor/github.com/golang/protobuf/proto/proto3_test.go index 462f8055c3..735837f2de 100644 --- a/vendor/github.com/golang/protobuf/proto/proto3_test.go +++ b/vendor/github.com/golang/protobuf/proto/proto3_test.go @@ -93,6 +93,16 @@ func TestRoundTripProto3(t *testing.T) { } } +func TestGettersForBasicTypesExist(t *testing.T) { + var m pb.Message + if got := m.GetNested().GetBunny(); got != "" { + t.Errorf("m.GetNested().GetBunny() = %q, want empty string", got) + } + if got := m.GetNested().GetCute(); got { + t.Errorf("m.GetNested().GetCute() = %t, want false", got) + } +} + func TestProto3SetDefaults(t *testing.T) { in := &pb.Message{ Terrain: map[string]*pb.Nested{ diff --git a/vendor/github.com/golang/protobuf/proto/size_test.go b/vendor/github.com/golang/protobuf/proto/size_test.go index 40c393ff27..af1034dc7b 100644 --- a/vendor/github.com/golang/protobuf/proto/size_test.go +++ b/vendor/github.com/golang/protobuf/proto/size_test.go @@ -125,10 +125,27 @@ var SizeTests = []struct { {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}}, {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}}, - {"oneof not set", &pb.Communique{}}, - {"oneof zero int32", &pb.Communique{Union: &pb.Communique_Number{0}}}, - {"oneof int32", &pb.Communique{Union: &pb.Communique_Number{3}}}, - {"oneof string", &pb.Communique{Union: &pb.Communique_Name{"Rhythmic Fman"}}}, + {"oneof not set", &pb.Oneof{}}, + {"oneof bool", &pb.Oneof{Union: &pb.Oneof_F_Bool{true}}}, + {"oneof zero int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{0}}}, + {"oneof big int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{1 << 20}}}, + {"oneof int64", &pb.Oneof{Union: &pb.Oneof_F_Int64{42}}}, + {"oneof fixed32", &pb.Oneof{Union: &pb.Oneof_F_Fixed32{43}}}, + {"oneof fixed64", &pb.Oneof{Union: &pb.Oneof_F_Fixed64{44}}}, + {"oneof uint32", &pb.Oneof{Union: &pb.Oneof_F_Uint32{45}}}, + {"oneof uint64", &pb.Oneof{Union: &pb.Oneof_F_Uint64{46}}}, + {"oneof float", &pb.Oneof{Union: &pb.Oneof_F_Float{47.1}}}, + {"oneof double", &pb.Oneof{Union: &pb.Oneof_F_Double{48.9}}}, + {"oneof string", &pb.Oneof{Union: &pb.Oneof_F_String{"Rhythmic Fman"}}}, + {"oneof bytes", &pb.Oneof{Union: &pb.Oneof_F_Bytes{[]byte("let go")}}}, + {"oneof sint32", &pb.Oneof{Union: &pb.Oneof_F_Sint32{50}}}, + {"oneof sint64", &pb.Oneof{Union: &pb.Oneof_F_Sint64{51}}}, + {"oneof enum", &pb.Oneof{Union: &pb.Oneof_F_Enum{pb.MyMessage_BLUE}}}, + {"message for oneof", &pb.GoTestField{Label: String("k"), Type: String("v")}}, + {"oneof message", &pb.Oneof{Union: &pb.Oneof_F_Message{&pb.GoTestField{Label: String("k"), Type: String("v")}}}}, + {"oneof group", &pb.Oneof{Union: &pb.Oneof_FGroup{&pb.Oneof_F_Group{X: Int32(52)}}}}, + {"oneof largest tag", &pb.Oneof{Union: &pb.Oneof_F_Largest_Tag{1}}}, + {"multiple oneofs", &pb.Oneof{Union: &pb.Oneof_F_Int32{1}, Tormato: &pb.Oneof_Value{2}}}, } func TestSize(t *testing.T) { diff --git a/vendor/github.com/golang/protobuf/proto/testdata/test.pb.go b/vendor/github.com/golang/protobuf/proto/testdata/test.pb.go index 7d308086ee..e980d1a03c 100644 --- a/vendor/github.com/golang/protobuf/proto/testdata/test.pb.go +++ b/vendor/github.com/golang/protobuf/proto/testdata/test.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: test.proto -// DO NOT EDIT! /* Package testdata is a generated protocol buffer package. @@ -12,6 +11,7 @@ It has these top-level messages: GoEnum GoTestField GoTest + GoTestRequiredGroupField GoSkipTest NonPackedTest PackedTest @@ -20,8 +20,10 @@ It has these top-level messages: NewMessage InnerMessage OtherMessage + RequiredInnerMessage MyMessage Ext + ComplexExtension DefaultsMessage MyMessageSet Empty @@ -35,6 +37,7 @@ It has these top-level messages: GroupNew FloatingPoint MessageWithMap + Oneof Communique */ package testdata @@ -48,6 +51,12 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + type FOO int32 const ( @@ -77,6 +86,7 @@ func (x *FOO) UnmarshalJSON(data []byte) error { *x = FOO(value) return nil } +func (FOO) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } // An enum, for completeness. type GoTest_KIND int32 @@ -148,6 +158,7 @@ func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { *x = GoTest_KIND(value) return nil } +func (GoTest_KIND) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } type MyMessage_Color int32 @@ -184,6 +195,7 @@ func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { *x = MyMessage_Color(value) return nil } +func (MyMessage_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} } type DefaultsMessage_DefaultsEnum int32 @@ -220,6 +232,9 @@ func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error { *x = DefaultsMessage_DefaultsEnum(value) return nil } +func (DefaultsMessage_DefaultsEnum) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{16, 0} +} type Defaults_Color int32 @@ -256,6 +271,7 @@ func (x *Defaults_Color) UnmarshalJSON(data []byte) error { *x = Defaults_Color(value) return nil } +func (Defaults_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{21, 0} } type RepeatedEnum_Color int32 @@ -286,15 +302,17 @@ func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { *x = RepeatedEnum_Color(value) return nil } +func (RepeatedEnum_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{23, 0} } type GoEnum struct { Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *GoEnum) Reset() { *m = GoEnum{} } -func (m *GoEnum) String() string { return proto.CompactTextString(m) } -func (*GoEnum) ProtoMessage() {} +func (m *GoEnum) Reset() { *m = GoEnum{} } +func (m *GoEnum) String() string { return proto.CompactTextString(m) } +func (*GoEnum) ProtoMessage() {} +func (*GoEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (m *GoEnum) GetFoo() FOO { if m != nil && m.Foo != nil { @@ -309,9 +327,10 @@ type GoTestField struct { XXX_unrecognized []byte `json:"-"` } -func (m *GoTestField) Reset() { *m = GoTestField{} } -func (m *GoTestField) String() string { return proto.CompactTextString(m) } -func (*GoTestField) ProtoMessage() {} +func (m *GoTestField) Reset() { *m = GoTestField{} } +func (m *GoTestField) String() string { return proto.CompactTextString(m) } +func (*GoTestField) ProtoMessage() {} +func (*GoTestField) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (m *GoTestField) GetLabel() string { if m != nil && m.Label != nil { @@ -337,82 +356,83 @@ type GoTest struct { RepeatedField []*GoTestField `protobuf:"bytes,5,rep,name=RepeatedField" json:"RepeatedField,omitempty"` OptionalField *GoTestField `protobuf:"bytes,6,opt,name=OptionalField" json:"OptionalField,omitempty"` // Required fields of all basic types - F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required" json:"F_Bool_required,omitempty"` - F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required" json:"F_Int32_required,omitempty"` - F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required" json:"F_Int64_required,omitempty"` - F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required" json:"F_Fixed32_required,omitempty"` - F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required" json:"F_Fixed64_required,omitempty"` - F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required" json:"F_Uint32_required,omitempty"` - F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required" json:"F_Uint64_required,omitempty"` - F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required" json:"F_Float_required,omitempty"` - F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required" json:"F_Double_required,omitempty"` - F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required" json:"F_String_required,omitempty"` - F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required" json:"F_Bytes_required,omitempty"` - F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required" json:"F_Sint32_required,omitempty"` - F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required" json:"F_Sint64_required,omitempty"` + F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required,json=FBoolRequired" json:"F_Bool_required,omitempty"` + F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required,json=FInt32Required" json:"F_Int32_required,omitempty"` + F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required,json=FInt64Required" json:"F_Int64_required,omitempty"` + F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required,json=FFixed32Required" json:"F_Fixed32_required,omitempty"` + F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required,json=FFixed64Required" json:"F_Fixed64_required,omitempty"` + F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required,json=FUint32Required" json:"F_Uint32_required,omitempty"` + F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required,json=FUint64Required" json:"F_Uint64_required,omitempty"` + F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required,json=FFloatRequired" json:"F_Float_required,omitempty"` + F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required,json=FDoubleRequired" json:"F_Double_required,omitempty"` + F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required,json=FStringRequired" json:"F_String_required,omitempty"` + F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required,json=FBytesRequired" json:"F_Bytes_required,omitempty"` + F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required,json=FSint32Required" json:"F_Sint32_required,omitempty"` + F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required,json=FSint64Required" json:"F_Sint64_required,omitempty"` // Repeated fields of all basic types - F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated" json:"F_Bool_repeated,omitempty"` - F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated" json:"F_Int32_repeated,omitempty"` - F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated" json:"F_Int64_repeated,omitempty"` - F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated" json:"F_Fixed32_repeated,omitempty"` - F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated" json:"F_Fixed64_repeated,omitempty"` - F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated" json:"F_Uint32_repeated,omitempty"` - F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated" json:"F_Uint64_repeated,omitempty"` - F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated" json:"F_Float_repeated,omitempty"` - F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated" json:"F_Double_repeated,omitempty"` - F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated" json:"F_String_repeated,omitempty"` - F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated" json:"F_Bytes_repeated,omitempty"` - F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated" json:"F_Sint32_repeated,omitempty"` - F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated" json:"F_Sint64_repeated,omitempty"` + F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated,json=FBoolRepeated" json:"F_Bool_repeated,omitempty"` + F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated,json=FInt32Repeated" json:"F_Int32_repeated,omitempty"` + F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated,json=FInt64Repeated" json:"F_Int64_repeated,omitempty"` + F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated,json=FFixed32Repeated" json:"F_Fixed32_repeated,omitempty"` + F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated,json=FFixed64Repeated" json:"F_Fixed64_repeated,omitempty"` + F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated,json=FUint32Repeated" json:"F_Uint32_repeated,omitempty"` + F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated,json=FUint64Repeated" json:"F_Uint64_repeated,omitempty"` + F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated,json=FFloatRepeated" json:"F_Float_repeated,omitempty"` + F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated,json=FDoubleRepeated" json:"F_Double_repeated,omitempty"` + F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated,json=FStringRepeated" json:"F_String_repeated,omitempty"` + F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated,json=FBytesRepeated" json:"F_Bytes_repeated,omitempty"` + F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated,json=FSint32Repeated" json:"F_Sint32_repeated,omitempty"` + F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated,json=FSint64Repeated" json:"F_Sint64_repeated,omitempty"` // Optional fields of all basic types - F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional" json:"F_Bool_optional,omitempty"` - F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional" json:"F_Int32_optional,omitempty"` - F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional" json:"F_Int64_optional,omitempty"` - F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional" json:"F_Fixed32_optional,omitempty"` - F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional" json:"F_Fixed64_optional,omitempty"` - F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional" json:"F_Uint32_optional,omitempty"` - F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional" json:"F_Uint64_optional,omitempty"` - F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional" json:"F_Float_optional,omitempty"` - F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional" json:"F_Double_optional,omitempty"` - F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional" json:"F_String_optional,omitempty"` - F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional" json:"F_Bytes_optional,omitempty"` - F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional" json:"F_Sint32_optional,omitempty"` - F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional" json:"F_Sint64_optional,omitempty"` + F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional,json=FBoolOptional" json:"F_Bool_optional,omitempty"` + F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional,json=FInt32Optional" json:"F_Int32_optional,omitempty"` + F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional,json=FInt64Optional" json:"F_Int64_optional,omitempty"` + F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional,json=FFixed32Optional" json:"F_Fixed32_optional,omitempty"` + F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional,json=FFixed64Optional" json:"F_Fixed64_optional,omitempty"` + F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional,json=FUint32Optional" json:"F_Uint32_optional,omitempty"` + F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional,json=FUint64Optional" json:"F_Uint64_optional,omitempty"` + F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional,json=FFloatOptional" json:"F_Float_optional,omitempty"` + F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional,json=FDoubleOptional" json:"F_Double_optional,omitempty"` + F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional,json=FStringOptional" json:"F_String_optional,omitempty"` + F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional,json=FBytesOptional" json:"F_Bytes_optional,omitempty"` + F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional,json=FSint32Optional" json:"F_Sint32_optional,omitempty"` + F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional,json=FSint64Optional" json:"F_Sint64_optional,omitempty"` // Default-valued fields of all basic types - F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,def=1" json:"F_Bool_defaulted,omitempty"` - F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,def=32" json:"F_Int32_defaulted,omitempty"` - F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,def=64" json:"F_Int64_defaulted,omitempty"` - F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` - F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` - F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` - F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` - F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,def=314159" json:"F_Float_defaulted,omitempty"` - F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,def=271828" json:"F_Double_defaulted,omitempty"` - F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` - F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` - F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` - F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` + F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,json=FBoolDefaulted,def=1" json:"F_Bool_defaulted,omitempty"` + F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,json=FInt32Defaulted,def=32" json:"F_Int32_defaulted,omitempty"` + F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,json=FInt64Defaulted,def=64" json:"F_Int64_defaulted,omitempty"` + F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,json=FFixed32Defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` + F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,json=FFixed64Defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` + F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,json=FUint32Defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` + F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,json=FUint64Defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` + F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,json=FFloatDefaulted,def=314159" json:"F_Float_defaulted,omitempty"` + F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,json=FDoubleDefaulted,def=271828" json:"F_Double_defaulted,omitempty"` + F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,json=FStringDefaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` + F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,json=FBytesDefaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` + F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,json=FSint32Defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` + F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,json=FSint64Defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` // Packed repeated fields (no string or bytes). - F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed" json:"F_Bool_repeated_packed,omitempty"` - F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed" json:"F_Int32_repeated_packed,omitempty"` - F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed" json:"F_Int64_repeated_packed,omitempty"` - F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed" json:"F_Fixed32_repeated_packed,omitempty"` - F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed" json:"F_Fixed64_repeated_packed,omitempty"` - F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed" json:"F_Uint32_repeated_packed,omitempty"` - F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed" json:"F_Uint64_repeated_packed,omitempty"` - F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed" json:"F_Float_repeated_packed,omitempty"` - F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed" json:"F_Double_repeated_packed,omitempty"` - F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed" json:"F_Sint32_repeated_packed,omitempty"` - F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed" json:"F_Sint64_repeated_packed,omitempty"` - Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup" json:"requiredgroup,omitempty"` - Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup" json:"repeatedgroup,omitempty"` - Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed,json=FBoolRepeatedPacked" json:"F_Bool_repeated_packed,omitempty"` + F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed,json=FInt32RepeatedPacked" json:"F_Int32_repeated_packed,omitempty"` + F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed,json=FInt64RepeatedPacked" json:"F_Int64_repeated_packed,omitempty"` + F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed,json=FFixed32RepeatedPacked" json:"F_Fixed32_repeated_packed,omitempty"` + F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed,json=FFixed64RepeatedPacked" json:"F_Fixed64_repeated_packed,omitempty"` + F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed,json=FUint32RepeatedPacked" json:"F_Uint32_repeated_packed,omitempty"` + F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed,json=FUint64RepeatedPacked" json:"F_Uint64_repeated_packed,omitempty"` + F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed,json=FFloatRepeatedPacked" json:"F_Float_repeated_packed,omitempty"` + F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed,json=FDoubleRepeatedPacked" json:"F_Double_repeated_packed,omitempty"` + F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed,json=FSint32RepeatedPacked" json:"F_Sint32_repeated_packed,omitempty"` + F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed,json=FSint64RepeatedPacked" json:"F_Sint64_repeated_packed,omitempty"` + Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup,json=requiredgroup" json:"requiredgroup,omitempty"` + Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup,json=repeatedgroup" json:"repeatedgroup,omitempty"` + Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup,json=optionalgroup" json:"optionalgroup,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *GoTest) Reset() { *m = GoTest{} } -func (m *GoTest) String() string { return proto.CompactTextString(m) } -func (*GoTest) ProtoMessage() {} +func (m *GoTest) Reset() { *m = GoTest{} } +func (m *GoTest) String() string { return proto.CompactTextString(m) } +func (*GoTest) ProtoMessage() {} +func (*GoTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } const Default_GoTest_F_BoolDefaulted bool = true const Default_GoTest_F_Int32Defaulted int32 = 32 @@ -940,9 +960,10 @@ type GoTest_RequiredGroup struct { XXX_unrecognized []byte `json:"-"` } -func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } -func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_RequiredGroup) ProtoMessage() {} +func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } +func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RequiredGroup) ProtoMessage() {} +func (*GoTest_RequiredGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } func (m *GoTest_RequiredGroup) GetRequiredField() string { if m != nil && m.RequiredField != nil { @@ -956,9 +977,10 @@ type GoTest_RepeatedGroup struct { XXX_unrecognized []byte `json:"-"` } -func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } -func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_RepeatedGroup) ProtoMessage() {} +func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } +func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RepeatedGroup) ProtoMessage() {} +func (*GoTest_RepeatedGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} } func (m *GoTest_RepeatedGroup) GetRequiredField() string { if m != nil && m.RequiredField != nil { @@ -972,9 +994,10 @@ type GoTest_OptionalGroup struct { XXX_unrecognized []byte `json:"-"` } -func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } -func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_OptionalGroup) ProtoMessage() {} +func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } +func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_OptionalGroup) ProtoMessage() {} +func (*GoTest_OptionalGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 2} } func (m *GoTest_OptionalGroup) GetRequiredField() string { if m != nil && m.RequiredField != nil { @@ -983,21 +1006,59 @@ func (m *GoTest_OptionalGroup) GetRequiredField() string { return "" } +// For testing a group containing a required field. +type GoTestRequiredGroupField struct { + Group *GoTestRequiredGroupField_Group `protobuf:"group,1,req,name=Group,json=group" json:"group,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTestRequiredGroupField) Reset() { *m = GoTestRequiredGroupField{} } +func (m *GoTestRequiredGroupField) String() string { return proto.CompactTextString(m) } +func (*GoTestRequiredGroupField) ProtoMessage() {} +func (*GoTestRequiredGroupField) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *GoTestRequiredGroupField) GetGroup() *GoTestRequiredGroupField_Group { + if m != nil { + return m.Group + } + return nil +} + +type GoTestRequiredGroupField_Group struct { + Field *int32 `protobuf:"varint,2,req,name=Field" json:"Field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTestRequiredGroupField_Group) Reset() { *m = GoTestRequiredGroupField_Group{} } +func (m *GoTestRequiredGroupField_Group) String() string { return proto.CompactTextString(m) } +func (*GoTestRequiredGroupField_Group) ProtoMessage() {} +func (*GoTestRequiredGroupField_Group) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{3, 0} +} + +func (m *GoTestRequiredGroupField_Group) GetField() int32 { + if m != nil && m.Field != nil { + return *m.Field + } + return 0 +} + // For testing skipping of unrecognized fields. // Numbers are all big, larger than tag numbers in GoTestField, // the message used in the corresponding test. type GoSkipTest struct { - SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32" json:"skip_int32,omitempty"` - SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32" json:"skip_fixed32,omitempty"` - SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64" json:"skip_fixed64,omitempty"` - SkipString *string `protobuf:"bytes,14,req,name=skip_string" json:"skip_string,omitempty"` - Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup" json:"skipgroup,omitempty"` + SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32,json=skipInt32" json:"skip_int32,omitempty"` + SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32,json=skipFixed32" json:"skip_fixed32,omitempty"` + SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64,json=skipFixed64" json:"skip_fixed64,omitempty"` + SkipString *string `protobuf:"bytes,14,req,name=skip_string,json=skipString" json:"skip_string,omitempty"` + Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup,json=skipgroup" json:"skipgroup,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } -func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } -func (*GoSkipTest) ProtoMessage() {} +func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } +func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest) ProtoMessage() {} +func (*GoSkipTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } func (m *GoSkipTest) GetSkipInt32() int32 { if m != nil && m.SkipInt32 != nil { @@ -1035,14 +1096,15 @@ func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { } type GoSkipTest_SkipGroup struct { - GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32" json:"group_int32,omitempty"` - GroupString *string `protobuf:"bytes,17,req,name=group_string" json:"group_string,omitempty"` + GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32,json=groupInt32" json:"group_int32,omitempty"` + GroupString *string `protobuf:"bytes,17,req,name=group_string,json=groupString" json:"group_string,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } -func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } -func (*GoSkipTest_SkipGroup) ProtoMessage() {} +func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } +func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest_SkipGroup) ProtoMessage() {} +func (*GoSkipTest_SkipGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { if m != nil && m.GroupInt32 != nil { @@ -1065,9 +1127,10 @@ type NonPackedTest struct { XXX_unrecognized []byte `json:"-"` } -func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } -func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } -func (*NonPackedTest) ProtoMessage() {} +func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } +func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } +func (*NonPackedTest) ProtoMessage() {} +func (*NonPackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } func (m *NonPackedTest) GetA() []int32 { if m != nil { @@ -1081,9 +1144,10 @@ type PackedTest struct { XXX_unrecognized []byte `json:"-"` } -func (m *PackedTest) Reset() { *m = PackedTest{} } -func (m *PackedTest) String() string { return proto.CompactTextString(m) } -func (*PackedTest) ProtoMessage() {} +func (m *PackedTest) Reset() { *m = PackedTest{} } +func (m *PackedTest) String() string { return proto.CompactTextString(m) } +func (*PackedTest) ProtoMessage() {} +func (*PackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *PackedTest) GetB() []int32 { if m != nil { @@ -1094,13 +1158,14 @@ func (m *PackedTest) GetB() []int32 { type MaxTag struct { // Maximum possible tag number. - LastField *string `protobuf:"bytes,536870911,opt,name=last_field" json:"last_field,omitempty"` + LastField *string `protobuf:"bytes,536870911,opt,name=last_field,json=lastField" json:"last_field,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *MaxTag) Reset() { *m = MaxTag{} } -func (m *MaxTag) String() string { return proto.CompactTextString(m) } -func (*MaxTag) ProtoMessage() {} +func (m *MaxTag) Reset() { *m = MaxTag{} } +func (m *MaxTag) String() string { return proto.CompactTextString(m) } +func (*MaxTag) ProtoMessage() {} +func (*MaxTag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *MaxTag) GetLastField() string { if m != nil && m.LastField != nil { @@ -1115,9 +1180,10 @@ type OldMessage struct { XXX_unrecognized []byte `json:"-"` } -func (m *OldMessage) Reset() { *m = OldMessage{} } -func (m *OldMessage) String() string { return proto.CompactTextString(m) } -func (*OldMessage) ProtoMessage() {} +func (m *OldMessage) Reset() { *m = OldMessage{} } +func (m *OldMessage) String() string { return proto.CompactTextString(m) } +func (*OldMessage) ProtoMessage() {} +func (*OldMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *OldMessage) GetNested() *OldMessage_Nested { if m != nil { @@ -1138,9 +1204,10 @@ type OldMessage_Nested struct { XXX_unrecognized []byte `json:"-"` } -func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } -func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } -func (*OldMessage_Nested) ProtoMessage() {} +func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } +func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*OldMessage_Nested) ProtoMessage() {} +func (*OldMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8, 0} } func (m *OldMessage_Nested) GetName() string { if m != nil && m.Name != nil { @@ -1158,9 +1225,10 @@ type NewMessage struct { XXX_unrecognized []byte `json:"-"` } -func (m *NewMessage) Reset() { *m = NewMessage{} } -func (m *NewMessage) String() string { return proto.CompactTextString(m) } -func (*NewMessage) ProtoMessage() {} +func (m *NewMessage) Reset() { *m = NewMessage{} } +func (m *NewMessage) String() string { return proto.CompactTextString(m) } +func (*NewMessage) ProtoMessage() {} +func (*NewMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } func (m *NewMessage) GetNested() *NewMessage_Nested { if m != nil { @@ -1178,13 +1246,14 @@ func (m *NewMessage) GetNum() int64 { type NewMessage_Nested struct { Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - FoodGroup *string `protobuf:"bytes,2,opt,name=food_group" json:"food_group,omitempty"` + FoodGroup *string `protobuf:"bytes,2,opt,name=food_group,json=foodGroup" json:"food_group,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } -func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } -func (*NewMessage_Nested) ProtoMessage() {} +func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } +func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*NewMessage_Nested) ProtoMessage() {} +func (*NewMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} } func (m *NewMessage_Nested) GetName() string { if m != nil && m.Name != nil { @@ -1207,9 +1276,10 @@ type InnerMessage struct { XXX_unrecognized []byte `json:"-"` } -func (m *InnerMessage) Reset() { *m = InnerMessage{} } -func (m *InnerMessage) String() string { return proto.CompactTextString(m) } -func (*InnerMessage) ProtoMessage() {} +func (m *InnerMessage) Reset() { *m = InnerMessage{} } +func (m *InnerMessage) String() string { return proto.CompactTextString(m) } +func (*InnerMessage) ProtoMessage() {} +func (*InnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } const Default_InnerMessage_Port int32 = 4000 @@ -1235,16 +1305,26 @@ func (m *InnerMessage) GetConnected() bool { } type OtherMessage struct { - Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` - Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` - XXX_unrecognized []byte `json:"-"` + Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` + Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` } -func (m *OtherMessage) Reset() { *m = OtherMessage{} } -func (m *OtherMessage) String() string { return proto.CompactTextString(m) } -func (*OtherMessage) ProtoMessage() {} +func (m *OtherMessage) Reset() { *m = OtherMessage{} } +func (m *OtherMessage) String() string { return proto.CompactTextString(m) } +func (*OtherMessage) ProtoMessage() {} +func (*OtherMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +var extRange_OtherMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*OtherMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OtherMessage +} func (m *OtherMessage) GetKey() int64 { if m != nil && m.Key != nil { @@ -1274,26 +1354,45 @@ func (m *OtherMessage) GetInner() *InnerMessage { return nil } +type RequiredInnerMessage struct { + LeoFinallyWonAnOscar *InnerMessage `protobuf:"bytes,1,req,name=leo_finally_won_an_oscar,json=leoFinallyWonAnOscar" json:"leo_finally_won_an_oscar,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RequiredInnerMessage) Reset() { *m = RequiredInnerMessage{} } +func (m *RequiredInnerMessage) String() string { return proto.CompactTextString(m) } +func (*RequiredInnerMessage) ProtoMessage() {} +func (*RequiredInnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *RequiredInnerMessage) GetLeoFinallyWonAnOscar() *InnerMessage { + if m != nil { + return m.LeoFinallyWonAnOscar + } + return nil +} + type MyMessage struct { - Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` - Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` - Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` - Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` - Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` - RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner" json:"rep_inner,omitempty"` - Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` - Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"` + Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` + Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` + Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` + Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` + Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` + WeMustGoDeeper *RequiredInnerMessage `protobuf:"bytes,13,opt,name=we_must_go_deeper,json=weMustGoDeeper" json:"we_must_go_deeper,omitempty"` + RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner,json=repInner" json:"rep_inner,omitempty"` + Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` + Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` // This field becomes [][]byte in the generated code. - RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes" json:"rep_bytes,omitempty"` - Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` + RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes,json=repBytes" json:"rep_bytes,omitempty"` + Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` } -func (m *MyMessage) Reset() { *m = MyMessage{} } -func (m *MyMessage) String() string { return proto.CompactTextString(m) } -func (*MyMessage) ProtoMessage() {} +func (m *MyMessage) Reset() { *m = MyMessage{} } +func (m *MyMessage) String() string { return proto.CompactTextString(m) } +func (*MyMessage) ProtoMessage() {} +func (*MyMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } var extRange_MyMessage = []proto.ExtensionRange{ {100, 536870911}, @@ -1302,12 +1401,6 @@ var extRange_MyMessage = []proto.ExtensionRange{ func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { return extRange_MyMessage } -func (m *MyMessage) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} func (m *MyMessage) GetCount() int32 { if m != nil && m.Count != nil { @@ -1351,6 +1444,13 @@ func (m *MyMessage) GetOthers() []*OtherMessage { return nil } +func (m *MyMessage) GetWeMustGoDeeper() *RequiredInnerMessage { + if m != nil { + return m.WeMustGoDeeper + } + return nil +} + func (m *MyMessage) GetRepInner() []*InnerMessage { if m != nil { return m.RepInner @@ -1387,13 +1487,14 @@ func (m *MyMessage) GetBigfloat() float64 { } type MyMessage_SomeGroup struct { - GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"` + GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } -func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } -func (*MyMessage_SomeGroup) ProtoMessage() {} +func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } +func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*MyMessage_SomeGroup) ProtoMessage() {} +func (*MyMessage_SomeGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} } func (m *MyMessage_SomeGroup) GetGroupField() int32 { if m != nil && m.GroupField != nil { @@ -1407,9 +1508,10 @@ type Ext struct { XXX_unrecognized []byte `json:"-"` } -func (m *Ext) Reset() { *m = Ext{} } -func (m *Ext) String() string { return proto.CompactTextString(m) } -func (*Ext) ProtoMessage() {} +func (m *Ext) Reset() { *m = Ext{} } +func (m *Ext) String() string { return proto.CompactTextString(m) } +func (*Ext) ProtoMessage() {} +func (*Ext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } func (m *Ext) GetData() string { if m != nil && m.Data != nil { @@ -1424,6 +1526,7 @@ var E_Ext_More = &proto.ExtensionDesc{ Field: 103, Name: "testdata.Ext.more", Tag: "bytes,103,opt,name=more", + Filename: "test.proto", } var E_Ext_Text = &proto.ExtensionDesc{ @@ -1432,6 +1535,7 @@ var E_Ext_Text = &proto.ExtensionDesc{ Field: 104, Name: "testdata.Ext.text", Tag: "bytes,104,opt,name=text", + Filename: "test.proto", } var E_Ext_Number = &proto.ExtensionDesc{ @@ -1440,16 +1544,51 @@ var E_Ext_Number = &proto.ExtensionDesc{ Field: 105, Name: "testdata.Ext.number", Tag: "varint,105,opt,name=number", + Filename: "test.proto", +} + +type ComplexExtension struct { + First *int32 `protobuf:"varint,1,opt,name=first" json:"first,omitempty"` + Second *int32 `protobuf:"varint,2,opt,name=second" json:"second,omitempty"` + Third []int32 `protobuf:"varint,3,rep,name=third" json:"third,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ComplexExtension) Reset() { *m = ComplexExtension{} } +func (m *ComplexExtension) String() string { return proto.CompactTextString(m) } +func (*ComplexExtension) ProtoMessage() {} +func (*ComplexExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *ComplexExtension) GetFirst() int32 { + if m != nil && m.First != nil { + return *m.First + } + return 0 +} + +func (m *ComplexExtension) GetSecond() int32 { + if m != nil && m.Second != nil { + return *m.Second + } + return 0 +} + +func (m *ComplexExtension) GetThird() []int32 { + if m != nil { + return m.Third + } + return nil } type DefaultsMessage struct { - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` } -func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} } -func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) } -func (*DefaultsMessage) ProtoMessage() {} +func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} } +func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) } +func (*DefaultsMessage) ProtoMessage() {} +func (*DefaultsMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } var extRange_DefaultsMessage = []proto.ExtensionRange{ {100, 536870911}, @@ -1458,33 +1597,28 @@ var extRange_DefaultsMessage = []proto.ExtensionRange{ func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange { return extRange_DefaultsMessage } -func (m *DefaultsMessage) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} type MyMessageSet struct { - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` } -func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } -func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } -func (*MyMessageSet) ProtoMessage() {} +func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } +func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } +func (*MyMessageSet) ProtoMessage() {} +func (*MyMessageSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } func (m *MyMessageSet) Marshal() ([]byte, error) { - return proto.MarshalMessageSet(m.ExtensionMap()) + return proto.MarshalMessageSet(&m.XXX_InternalExtensions) } func (m *MyMessageSet) Unmarshal(buf []byte) error { - return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) + return proto.UnmarshalMessageSet(buf, &m.XXX_InternalExtensions) } func (m *MyMessageSet) MarshalJSON() ([]byte, error) { - return proto.MarshalMessageSetJSON(m.XXX_extensions) + return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) } func (m *MyMessageSet) UnmarshalJSON(buf []byte) error { - return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions) + return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) } // ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler @@ -1498,29 +1632,25 @@ var extRange_MyMessageSet = []proto.ExtensionRange{ func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { return extRange_MyMessageSet } -func (m *MyMessageSet) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} type Empty struct { XXX_unrecognized []byte `json:"-"` } -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } type MessageList struct { - Message []*MessageList_Message `protobuf:"group,1,rep,name=Message" json:"message,omitempty"` + Message []*MessageList_Message `protobuf:"group,1,rep,name=Message,json=message" json:"message,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *MessageList) Reset() { *m = MessageList{} } -func (m *MessageList) String() string { return proto.CompactTextString(m) } -func (*MessageList) ProtoMessage() {} +func (m *MessageList) Reset() { *m = MessageList{} } +func (m *MessageList) String() string { return proto.CompactTextString(m) } +func (*MessageList) ProtoMessage() {} +func (*MessageList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } func (m *MessageList) GetMessage() []*MessageList_Message { if m != nil { @@ -1535,9 +1665,10 @@ type MessageList_Message struct { XXX_unrecognized []byte `json:"-"` } -func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } -func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } -func (*MessageList_Message) ProtoMessage() {} +func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } +func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } +func (*MessageList_Message) ProtoMessage() {} +func (*MessageList_Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} } func (m *MessageList_Message) GetName() string { if m != nil && m.Name != nil { @@ -1554,14 +1685,15 @@ func (m *MessageList_Message) GetCount() int32 { } type Strings struct { - StringField *string `protobuf:"bytes,1,opt,name=string_field" json:"string_field,omitempty"` - BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field" json:"bytes_field,omitempty"` + StringField *string `protobuf:"bytes,1,opt,name=string_field,json=stringField" json:"string_field,omitempty"` + BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field,json=bytesField" json:"bytes_field,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *Strings) Reset() { *m = Strings{} } -func (m *Strings) String() string { return proto.CompactTextString(m) } -func (*Strings) ProtoMessage() {} +func (m *Strings) Reset() { *m = Strings{} } +func (m *Strings) String() string { return proto.CompactTextString(m) } +func (*Strings) ProtoMessage() {} +func (*Strings) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } func (m *Strings) GetStringField() string { if m != nil && m.StringField != nil { @@ -1580,34 +1712,35 @@ func (m *Strings) GetBytesField() []byte { type Defaults struct { // Default-valued fields of all basic types. // Same as GoTest, but copied here to make testing easier. - F_Bool *bool `protobuf:"varint,1,opt,name=F_Bool,def=1" json:"F_Bool,omitempty"` - F_Int32 *int32 `protobuf:"varint,2,opt,name=F_Int32,def=32" json:"F_Int32,omitempty"` - F_Int64 *int64 `protobuf:"varint,3,opt,name=F_Int64,def=64" json:"F_Int64,omitempty"` - F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,def=320" json:"F_Fixed32,omitempty"` - F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,def=640" json:"F_Fixed64,omitempty"` - F_Uint32 *uint32 `protobuf:"varint,6,opt,name=F_Uint32,def=3200" json:"F_Uint32,omitempty"` - F_Uint64 *uint64 `protobuf:"varint,7,opt,name=F_Uint64,def=6400" json:"F_Uint64,omitempty"` - F_Float *float32 `protobuf:"fixed32,8,opt,name=F_Float,def=314159" json:"F_Float,omitempty"` - F_Double *float64 `protobuf:"fixed64,9,opt,name=F_Double,def=271828" json:"F_Double,omitempty"` - F_String *string `protobuf:"bytes,10,opt,name=F_String,def=hello, \"world!\"\n" json:"F_String,omitempty"` - F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,def=Bignose" json:"F_Bytes,omitempty"` - F_Sint32 *int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,def=-32" json:"F_Sint32,omitempty"` - F_Sint64 *int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,def=-64" json:"F_Sint64,omitempty"` - F_Enum *Defaults_Color `protobuf:"varint,14,opt,name=F_Enum,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` + F_Bool *bool `protobuf:"varint,1,opt,name=F_Bool,json=FBool,def=1" json:"F_Bool,omitempty"` + F_Int32 *int32 `protobuf:"varint,2,opt,name=F_Int32,json=FInt32,def=32" json:"F_Int32,omitempty"` + F_Int64 *int64 `protobuf:"varint,3,opt,name=F_Int64,json=FInt64,def=64" json:"F_Int64,omitempty"` + F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=FFixed32,def=320" json:"F_Fixed32,omitempty"` + F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=FFixed64,def=640" json:"F_Fixed64,omitempty"` + F_Uint32 *uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=FUint32,def=3200" json:"F_Uint32,omitempty"` + F_Uint64 *uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=FUint64,def=6400" json:"F_Uint64,omitempty"` + F_Float *float32 `protobuf:"fixed32,8,opt,name=F_Float,json=FFloat,def=314159" json:"F_Float,omitempty"` + F_Double *float64 `protobuf:"fixed64,9,opt,name=F_Double,json=FDouble,def=271828" json:"F_Double,omitempty"` + F_String *string `protobuf:"bytes,10,opt,name=F_String,json=FString,def=hello, \"world!\"\n" json:"F_String,omitempty"` + F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=FBytes,def=Bignose" json:"F_Bytes,omitempty"` + F_Sint32 *int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=FSint32,def=-32" json:"F_Sint32,omitempty"` + F_Sint64 *int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=FSint64,def=-64" json:"F_Sint64,omitempty"` + F_Enum *Defaults_Color `protobuf:"varint,14,opt,name=F_Enum,json=FEnum,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` // More fields with crazy defaults. - F_Pinf *float32 `protobuf:"fixed32,15,opt,name=F_Pinf,def=inf" json:"F_Pinf,omitempty"` - F_Ninf *float32 `protobuf:"fixed32,16,opt,name=F_Ninf,def=-inf" json:"F_Ninf,omitempty"` - F_Nan *float32 `protobuf:"fixed32,17,opt,name=F_Nan,def=nan" json:"F_Nan,omitempty"` + F_Pinf *float32 `protobuf:"fixed32,15,opt,name=F_Pinf,json=FPinf,def=inf" json:"F_Pinf,omitempty"` + F_Ninf *float32 `protobuf:"fixed32,16,opt,name=F_Ninf,json=FNinf,def=-inf" json:"F_Ninf,omitempty"` + F_Nan *float32 `protobuf:"fixed32,17,opt,name=F_Nan,json=FNan,def=nan" json:"F_Nan,omitempty"` // Sub-message. Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` // Redundant but explicit defaults. - StrZero *string `protobuf:"bytes,19,opt,name=str_zero,def=" json:"str_zero,omitempty"` + StrZero *string `protobuf:"bytes,19,opt,name=str_zero,json=strZero,def=" json:"str_zero,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *Defaults) Reset() { *m = Defaults{} } -func (m *Defaults) String() string { return proto.CompactTextString(m) } -func (*Defaults) ProtoMessage() {} +func (m *Defaults) Reset() { *m = Defaults{} } +func (m *Defaults) String() string { return proto.CompactTextString(m) } +func (*Defaults) ProtoMessage() {} +func (*Defaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } const Default_Defaults_F_Bool bool = true const Default_Defaults_F_Int32 int32 = 32 @@ -1768,9 +1901,10 @@ type SubDefaults struct { XXX_unrecognized []byte `json:"-"` } -func (m *SubDefaults) Reset() { *m = SubDefaults{} } -func (m *SubDefaults) String() string { return proto.CompactTextString(m) } -func (*SubDefaults) ProtoMessage() {} +func (m *SubDefaults) Reset() { *m = SubDefaults{} } +func (m *SubDefaults) String() string { return proto.CompactTextString(m) } +func (*SubDefaults) ProtoMessage() {} +func (*SubDefaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } const Default_SubDefaults_N int64 = 7 @@ -1786,9 +1920,10 @@ type RepeatedEnum struct { XXX_unrecognized []byte `json:"-"` } -func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } -func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } -func (*RepeatedEnum) ProtoMessage() {} +func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } +func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } +func (*RepeatedEnum) ProtoMessage() {} +func (*RepeatedEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { if m != nil { @@ -1799,18 +1934,19 @@ func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { type MoreRepeated struct { Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` - BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed" json:"bools_packed,omitempty"` + BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed,json=boolsPacked" json:"bools_packed,omitempty"` Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` - IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed" json:"ints_packed,omitempty"` - Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed" json:"int64s_packed,omitempty"` + IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed,json=intsPacked" json:"ints_packed,omitempty"` + Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed,json=int64sPacked" json:"int64s_packed,omitempty"` Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } -func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } -func (*MoreRepeated) ProtoMessage() {} +func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } +func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } +func (*MoreRepeated) ProtoMessage() {} +func (*MoreRepeated) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } func (m *MoreRepeated) GetBools() []bool { if m != nil { @@ -1862,13 +1998,14 @@ func (m *MoreRepeated) GetFixeds() []uint32 { } type GroupOld struct { - G *GroupOld_G `protobuf:"group,101,opt,name=G" json:"g,omitempty"` + G *GroupOld_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *GroupOld) Reset() { *m = GroupOld{} } -func (m *GroupOld) String() string { return proto.CompactTextString(m) } -func (*GroupOld) ProtoMessage() {} +func (m *GroupOld) Reset() { *m = GroupOld{} } +func (m *GroupOld) String() string { return proto.CompactTextString(m) } +func (*GroupOld) ProtoMessage() {} +func (*GroupOld) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } func (m *GroupOld) GetG() *GroupOld_G { if m != nil { @@ -1882,9 +2019,10 @@ type GroupOld_G struct { XXX_unrecognized []byte `json:"-"` } -func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } -func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } -func (*GroupOld_G) ProtoMessage() {} +func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } +func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } +func (*GroupOld_G) ProtoMessage() {} +func (*GroupOld_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25, 0} } func (m *GroupOld_G) GetX() int32 { if m != nil && m.X != nil { @@ -1894,13 +2032,14 @@ func (m *GroupOld_G) GetX() int32 { } type GroupNew struct { - G *GroupNew_G `protobuf:"group,101,opt,name=G" json:"g,omitempty"` + G *GroupNew_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *GroupNew) Reset() { *m = GroupNew{} } -func (m *GroupNew) String() string { return proto.CompactTextString(m) } -func (*GroupNew) ProtoMessage() {} +func (m *GroupNew) Reset() { *m = GroupNew{} } +func (m *GroupNew) String() string { return proto.CompactTextString(m) } +func (*GroupNew) ProtoMessage() {} +func (*GroupNew) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } func (m *GroupNew) GetG() *GroupNew_G { if m != nil { @@ -1915,9 +2054,10 @@ type GroupNew_G struct { XXX_unrecognized []byte `json:"-"` } -func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } -func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } -func (*GroupNew_G) ProtoMessage() {} +func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } +func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } +func (*GroupNew_G) ProtoMessage() {} +func (*GroupNew_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26, 0} } func (m *GroupNew_G) GetX() int32 { if m != nil && m.X != nil { @@ -1935,12 +2075,14 @@ func (m *GroupNew_G) GetY() int32 { type FloatingPoint struct { F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` + Exact *bool `protobuf:"varint,2,opt,name=exact" json:"exact,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } -func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } -func (*FloatingPoint) ProtoMessage() {} +func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } +func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } +func (*FloatingPoint) ProtoMessage() {} +func (*FloatingPoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } func (m *FloatingPoint) GetF() float64 { if m != nil && m.F != nil { @@ -1949,17 +2091,25 @@ func (m *FloatingPoint) GetF() float64 { return 0 } +func (m *FloatingPoint) GetExact() bool { + if m != nil && m.Exact != nil { + return *m.Exact + } + return false +} + type MessageWithMap struct { - NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str,json=strToStr" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` XXX_unrecognized []byte `json:"-"` } -func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } -func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } -func (*MessageWithMap) ProtoMessage() {} +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} +func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } func (m *MessageWithMap) GetNameMapping() map[int32]string { if m != nil { @@ -1989,8 +2139,592 @@ func (m *MessageWithMap) GetStrToStr() map[string]string { return nil } +type Oneof struct { + // Types that are valid to be assigned to Union: + // *Oneof_F_Bool + // *Oneof_F_Int32 + // *Oneof_F_Int64 + // *Oneof_F_Fixed32 + // *Oneof_F_Fixed64 + // *Oneof_F_Uint32 + // *Oneof_F_Uint64 + // *Oneof_F_Float + // *Oneof_F_Double + // *Oneof_F_String + // *Oneof_F_Bytes + // *Oneof_F_Sint32 + // *Oneof_F_Sint64 + // *Oneof_F_Enum + // *Oneof_F_Message + // *Oneof_FGroup + // *Oneof_F_Largest_Tag + Union isOneof_Union `protobuf_oneof:"union"` + // Types that are valid to be assigned to Tormato: + // *Oneof_Value + Tormato isOneof_Tormato `protobuf_oneof:"tormato"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Oneof) Reset() { *m = Oneof{} } +func (m *Oneof) String() string { return proto.CompactTextString(m) } +func (*Oneof) ProtoMessage() {} +func (*Oneof) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +type isOneof_Union interface { + isOneof_Union() +} +type isOneof_Tormato interface { + isOneof_Tormato() +} + +type Oneof_F_Bool struct { + F_Bool bool `protobuf:"varint,1,opt,name=F_Bool,json=FBool,oneof"` +} +type Oneof_F_Int32 struct { + F_Int32 int32 `protobuf:"varint,2,opt,name=F_Int32,json=FInt32,oneof"` +} +type Oneof_F_Int64 struct { + F_Int64 int64 `protobuf:"varint,3,opt,name=F_Int64,json=FInt64,oneof"` +} +type Oneof_F_Fixed32 struct { + F_Fixed32 uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=FFixed32,oneof"` +} +type Oneof_F_Fixed64 struct { + F_Fixed64 uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=FFixed64,oneof"` +} +type Oneof_F_Uint32 struct { + F_Uint32 uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=FUint32,oneof"` +} +type Oneof_F_Uint64 struct { + F_Uint64 uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=FUint64,oneof"` +} +type Oneof_F_Float struct { + F_Float float32 `protobuf:"fixed32,8,opt,name=F_Float,json=FFloat,oneof"` +} +type Oneof_F_Double struct { + F_Double float64 `protobuf:"fixed64,9,opt,name=F_Double,json=FDouble,oneof"` +} +type Oneof_F_String struct { + F_String string `protobuf:"bytes,10,opt,name=F_String,json=FString,oneof"` +} +type Oneof_F_Bytes struct { + F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=FBytes,oneof"` +} +type Oneof_F_Sint32 struct { + F_Sint32 int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=FSint32,oneof"` +} +type Oneof_F_Sint64 struct { + F_Sint64 int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=FSint64,oneof"` +} +type Oneof_F_Enum struct { + F_Enum MyMessage_Color `protobuf:"varint,14,opt,name=F_Enum,json=FEnum,enum=testdata.MyMessage_Color,oneof"` +} +type Oneof_F_Message struct { + F_Message *GoTestField `protobuf:"bytes,15,opt,name=F_Message,json=FMessage,oneof"` +} +type Oneof_FGroup struct { + FGroup *Oneof_F_Group `protobuf:"group,16,opt,name=F_Group,json=fGroup,oneof"` +} +type Oneof_F_Largest_Tag struct { + F_Largest_Tag int32 `protobuf:"varint,536870911,opt,name=F_Largest_Tag,json=FLargestTag,oneof"` +} +type Oneof_Value struct { + Value int32 `protobuf:"varint,100,opt,name=value,oneof"` +} + +func (*Oneof_F_Bool) isOneof_Union() {} +func (*Oneof_F_Int32) isOneof_Union() {} +func (*Oneof_F_Int64) isOneof_Union() {} +func (*Oneof_F_Fixed32) isOneof_Union() {} +func (*Oneof_F_Fixed64) isOneof_Union() {} +func (*Oneof_F_Uint32) isOneof_Union() {} +func (*Oneof_F_Uint64) isOneof_Union() {} +func (*Oneof_F_Float) isOneof_Union() {} +func (*Oneof_F_Double) isOneof_Union() {} +func (*Oneof_F_String) isOneof_Union() {} +func (*Oneof_F_Bytes) isOneof_Union() {} +func (*Oneof_F_Sint32) isOneof_Union() {} +func (*Oneof_F_Sint64) isOneof_Union() {} +func (*Oneof_F_Enum) isOneof_Union() {} +func (*Oneof_F_Message) isOneof_Union() {} +func (*Oneof_FGroup) isOneof_Union() {} +func (*Oneof_F_Largest_Tag) isOneof_Union() {} +func (*Oneof_Value) isOneof_Tormato() {} + +func (m *Oneof) GetUnion() isOneof_Union { + if m != nil { + return m.Union + } + return nil +} +func (m *Oneof) GetTormato() isOneof_Tormato { + if m != nil { + return m.Tormato + } + return nil +} + +func (m *Oneof) GetF_Bool() bool { + if x, ok := m.GetUnion().(*Oneof_F_Bool); ok { + return x.F_Bool + } + return false +} + +func (m *Oneof) GetF_Int32() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Int32); ok { + return x.F_Int32 + } + return 0 +} + +func (m *Oneof) GetF_Int64() int64 { + if x, ok := m.GetUnion().(*Oneof_F_Int64); ok { + return x.F_Int64 + } + return 0 +} + +func (m *Oneof) GetF_Fixed32() uint32 { + if x, ok := m.GetUnion().(*Oneof_F_Fixed32); ok { + return x.F_Fixed32 + } + return 0 +} + +func (m *Oneof) GetF_Fixed64() uint64 { + if x, ok := m.GetUnion().(*Oneof_F_Fixed64); ok { + return x.F_Fixed64 + } + return 0 +} + +func (m *Oneof) GetF_Uint32() uint32 { + if x, ok := m.GetUnion().(*Oneof_F_Uint32); ok { + return x.F_Uint32 + } + return 0 +} + +func (m *Oneof) GetF_Uint64() uint64 { + if x, ok := m.GetUnion().(*Oneof_F_Uint64); ok { + return x.F_Uint64 + } + return 0 +} + +func (m *Oneof) GetF_Float() float32 { + if x, ok := m.GetUnion().(*Oneof_F_Float); ok { + return x.F_Float + } + return 0 +} + +func (m *Oneof) GetF_Double() float64 { + if x, ok := m.GetUnion().(*Oneof_F_Double); ok { + return x.F_Double + } + return 0 +} + +func (m *Oneof) GetF_String() string { + if x, ok := m.GetUnion().(*Oneof_F_String); ok { + return x.F_String + } + return "" +} + +func (m *Oneof) GetF_Bytes() []byte { + if x, ok := m.GetUnion().(*Oneof_F_Bytes); ok { + return x.F_Bytes + } + return nil +} + +func (m *Oneof) GetF_Sint32() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Sint32); ok { + return x.F_Sint32 + } + return 0 +} + +func (m *Oneof) GetF_Sint64() int64 { + if x, ok := m.GetUnion().(*Oneof_F_Sint64); ok { + return x.F_Sint64 + } + return 0 +} + +func (m *Oneof) GetF_Enum() MyMessage_Color { + if x, ok := m.GetUnion().(*Oneof_F_Enum); ok { + return x.F_Enum + } + return MyMessage_RED +} + +func (m *Oneof) GetF_Message() *GoTestField { + if x, ok := m.GetUnion().(*Oneof_F_Message); ok { + return x.F_Message + } + return nil +} + +func (m *Oneof) GetFGroup() *Oneof_F_Group { + if x, ok := m.GetUnion().(*Oneof_FGroup); ok { + return x.FGroup + } + return nil +} + +func (m *Oneof) GetF_Largest_Tag() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Largest_Tag); ok { + return x.F_Largest_Tag + } + return 0 +} + +func (m *Oneof) GetValue() int32 { + if x, ok := m.GetTormato().(*Oneof_Value); ok { + return x.Value + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Oneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Oneof_OneofMarshaler, _Oneof_OneofUnmarshaler, _Oneof_OneofSizer, []interface{}{ + (*Oneof_F_Bool)(nil), + (*Oneof_F_Int32)(nil), + (*Oneof_F_Int64)(nil), + (*Oneof_F_Fixed32)(nil), + (*Oneof_F_Fixed64)(nil), + (*Oneof_F_Uint32)(nil), + (*Oneof_F_Uint64)(nil), + (*Oneof_F_Float)(nil), + (*Oneof_F_Double)(nil), + (*Oneof_F_String)(nil), + (*Oneof_F_Bytes)(nil), + (*Oneof_F_Sint32)(nil), + (*Oneof_F_Sint64)(nil), + (*Oneof_F_Enum)(nil), + (*Oneof_F_Message)(nil), + (*Oneof_FGroup)(nil), + (*Oneof_F_Largest_Tag)(nil), + (*Oneof_Value)(nil), + } +} + +func _Oneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Oneof) + // union + switch x := m.Union.(type) { + case *Oneof_F_Bool: + t := uint64(0) + if x.F_Bool { + t = 1 + } + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Oneof_F_Int32: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Int32)) + case *Oneof_F_Int64: + b.EncodeVarint(3<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Int64)) + case *Oneof_F_Fixed32: + b.EncodeVarint(4<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(x.F_Fixed32)) + case *Oneof_F_Fixed64: + b.EncodeVarint(5<<3 | proto.WireFixed64) + b.EncodeFixed64(uint64(x.F_Fixed64)) + case *Oneof_F_Uint32: + b.EncodeVarint(6<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Uint32)) + case *Oneof_F_Uint64: + b.EncodeVarint(7<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Uint64)) + case *Oneof_F_Float: + b.EncodeVarint(8<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(math.Float32bits(x.F_Float))) + case *Oneof_F_Double: + b.EncodeVarint(9<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.F_Double)) + case *Oneof_F_String: + b.EncodeVarint(10<<3 | proto.WireBytes) + b.EncodeStringBytes(x.F_String) + case *Oneof_F_Bytes: + b.EncodeVarint(11<<3 | proto.WireBytes) + b.EncodeRawBytes(x.F_Bytes) + case *Oneof_F_Sint32: + b.EncodeVarint(12<<3 | proto.WireVarint) + b.EncodeZigzag32(uint64(x.F_Sint32)) + case *Oneof_F_Sint64: + b.EncodeVarint(13<<3 | proto.WireVarint) + b.EncodeZigzag64(uint64(x.F_Sint64)) + case *Oneof_F_Enum: + b.EncodeVarint(14<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Enum)) + case *Oneof_F_Message: + b.EncodeVarint(15<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.F_Message); err != nil { + return err + } + case *Oneof_FGroup: + b.EncodeVarint(16<<3 | proto.WireStartGroup) + if err := b.Marshal(x.FGroup); err != nil { + return err + } + b.EncodeVarint(16<<3 | proto.WireEndGroup) + case *Oneof_F_Largest_Tag: + b.EncodeVarint(536870911<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Largest_Tag)) + case nil: + default: + return fmt.Errorf("Oneof.Union has unexpected type %T", x) + } + // tormato + switch x := m.Tormato.(type) { + case *Oneof_Value: + b.EncodeVarint(100<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Value)) + case nil: + default: + return fmt.Errorf("Oneof.Tormato has unexpected type %T", x) + } + return nil +} + +func _Oneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Oneof) + switch tag { + case 1: // union.F_Bool + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Bool{x != 0} + return true, err + case 2: // union.F_Int32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Int32{int32(x)} + return true, err + case 3: // union.F_Int64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Int64{int64(x)} + return true, err + case 4: // union.F_Fixed32 + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Oneof_F_Fixed32{uint32(x)} + return true, err + case 5: // union.F_Fixed64 + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Oneof_F_Fixed64{x} + return true, err + case 6: // union.F_Uint32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Uint32{uint32(x)} + return true, err + case 7: // union.F_Uint64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Uint64{x} + return true, err + case 8: // union.F_Float + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Oneof_F_Float{math.Float32frombits(uint32(x))} + return true, err + case 9: // union.F_Double + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Oneof_F_Double{math.Float64frombits(x)} + return true, err + case 10: // union.F_String + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Oneof_F_String{x} + return true, err + case 11: // union.F_Bytes + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Oneof_F_Bytes{x} + return true, err + case 12: // union.F_Sint32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag32() + m.Union = &Oneof_F_Sint32{int32(x)} + return true, err + case 13: // union.F_Sint64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag64() + m.Union = &Oneof_F_Sint64{int64(x)} + return true, err + case 14: // union.F_Enum + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Enum{MyMessage_Color(x)} + return true, err + case 15: // union.F_Message + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GoTestField) + err := b.DecodeMessage(msg) + m.Union = &Oneof_F_Message{msg} + return true, err + case 16: // union.f_group + if wire != proto.WireStartGroup { + return true, proto.ErrInternalBadWireType + } + msg := new(Oneof_F_Group) + err := b.DecodeGroup(msg) + m.Union = &Oneof_FGroup{msg} + return true, err + case 536870911: // union.F_Largest_Tag + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Largest_Tag{int32(x)} + return true, err + case 100: // tormato.value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Tormato = &Oneof_Value{int32(x)} + return true, err + default: + return false, nil + } +} + +func _Oneof_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Oneof) + // union + switch x := m.Union.(type) { + case *Oneof_F_Bool: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += 1 + case *Oneof_F_Int32: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Int32)) + case *Oneof_F_Int64: + n += proto.SizeVarint(3<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Int64)) + case *Oneof_F_Fixed32: + n += proto.SizeVarint(4<<3 | proto.WireFixed32) + n += 4 + case *Oneof_F_Fixed64: + n += proto.SizeVarint(5<<3 | proto.WireFixed64) + n += 8 + case *Oneof_F_Uint32: + n += proto.SizeVarint(6<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Uint32)) + case *Oneof_F_Uint64: + n += proto.SizeVarint(7<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Uint64)) + case *Oneof_F_Float: + n += proto.SizeVarint(8<<3 | proto.WireFixed32) + n += 4 + case *Oneof_F_Double: + n += proto.SizeVarint(9<<3 | proto.WireFixed64) + n += 8 + case *Oneof_F_String: + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.F_String))) + n += len(x.F_String) + case *Oneof_F_Bytes: + n += proto.SizeVarint(11<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.F_Bytes))) + n += len(x.F_Bytes) + case *Oneof_F_Sint32: + n += proto.SizeVarint(12<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64((uint32(x.F_Sint32) << 1) ^ uint32((int32(x.F_Sint32) >> 31)))) + case *Oneof_F_Sint64: + n += proto.SizeVarint(13<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(uint64(x.F_Sint64<<1) ^ uint64((int64(x.F_Sint64) >> 63)))) + case *Oneof_F_Enum: + n += proto.SizeVarint(14<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Enum)) + case *Oneof_F_Message: + s := proto.Size(x.F_Message) + n += proto.SizeVarint(15<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Oneof_FGroup: + n += proto.SizeVarint(16<<3 | proto.WireStartGroup) + n += proto.Size(x.FGroup) + n += proto.SizeVarint(16<<3 | proto.WireEndGroup) + case *Oneof_F_Largest_Tag: + n += proto.SizeVarint(536870911<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Largest_Tag)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // tormato + switch x := m.Tormato.(type) { + case *Oneof_Value: + n += proto.SizeVarint(100<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Value)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Oneof_F_Group struct { + X *int32 `protobuf:"varint,17,opt,name=x" json:"x,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Oneof_F_Group) Reset() { *m = Oneof_F_Group{} } +func (m *Oneof_F_Group) String() string { return proto.CompactTextString(m) } +func (*Oneof_F_Group) ProtoMessage() {} +func (*Oneof_F_Group) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29, 0} } + +func (m *Oneof_F_Group) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + type Communique struct { - MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry" json:"make_me_cry,omitempty"` + MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` // This is a oneof, called "union". // // Types that are valid to be assigned to Union: @@ -2004,9 +2738,10 @@ type Communique struct { XXX_unrecognized []byte `json:"-"` } -func (m *Communique) Reset() { *m = Communique{} } -func (m *Communique) String() string { return proto.CompactTextString(m) } -func (*Communique) ProtoMessage() {} +func (m *Communique) Reset() { *m = Communique{} } +func (m *Communique) String() string { return proto.CompactTextString(m) } +func (*Communique) ProtoMessage() {} +func (*Communique) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } type isCommunique_Union interface { isCommunique_Union() @@ -2022,7 +2757,7 @@ type Communique_Data struct { Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` } type Communique_TempC struct { - TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,oneof"` + TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` } type Communique_Col struct { Col MyMessage_Color `protobuf:"varint,9,opt,name=col,enum=testdata.MyMessage_Color,oneof"` @@ -2095,8 +2830,8 @@ func (m *Communique) GetMsg() *Strings { } // XXX_OneofFuncs is for the internal use of the proto package. -func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { - return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, []interface{}{ +func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{ (*Communique_Number)(nil), (*Communique_Name)(nil), (*Communique_Data)(nil), @@ -2188,12 +2923,64 @@ func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buf } } +func _Communique_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Number)) + case *Communique_Name: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case *Communique_Data: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Data))) + n += len(x.Data) + case *Communique_TempC: + n += proto.SizeVarint(8<<3 | proto.WireFixed64) + n += 8 + case *Communique_Col: + n += proto.SizeVarint(9<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Col)) + case *Communique_Msg: + s := proto.Size(x.Msg) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + var E_Greeting = &proto.ExtensionDesc{ ExtendedType: (*MyMessage)(nil), ExtensionType: ([]string)(nil), Field: 106, Name: "testdata.greeting", Tag: "bytes,106,rep,name=greeting", + Filename: "test.proto", +} + +var E_Complex = &proto.ExtensionDesc{ + ExtendedType: (*OtherMessage)(nil), + ExtensionType: (*ComplexExtension)(nil), + Field: 200, + Name: "testdata.complex", + Tag: "bytes,200,opt,name=complex", + Filename: "test.proto", +} + +var E_RComplex = &proto.ExtensionDesc{ + ExtendedType: (*OtherMessage)(nil), + ExtensionType: ([]*ComplexExtension)(nil), + Field: 201, + Name: "testdata.r_complex", + Tag: "bytes,201,rep,name=r_complex,json=rComplex", + Filename: "test.proto", } var E_NoDefaultDouble = &proto.ExtensionDesc{ @@ -2201,7 +2988,8 @@ var E_NoDefaultDouble = &proto.ExtensionDesc{ ExtensionType: (*float64)(nil), Field: 101, Name: "testdata.no_default_double", - Tag: "fixed64,101,opt,name=no_default_double", + Tag: "fixed64,101,opt,name=no_default_double,json=noDefaultDouble", + Filename: "test.proto", } var E_NoDefaultFloat = &proto.ExtensionDesc{ @@ -2209,7 +2997,8 @@ var E_NoDefaultFloat = &proto.ExtensionDesc{ ExtensionType: (*float32)(nil), Field: 102, Name: "testdata.no_default_float", - Tag: "fixed32,102,opt,name=no_default_float", + Tag: "fixed32,102,opt,name=no_default_float,json=noDefaultFloat", + Filename: "test.proto", } var E_NoDefaultInt32 = &proto.ExtensionDesc{ @@ -2217,7 +3006,8 @@ var E_NoDefaultInt32 = &proto.ExtensionDesc{ ExtensionType: (*int32)(nil), Field: 103, Name: "testdata.no_default_int32", - Tag: "varint,103,opt,name=no_default_int32", + Tag: "varint,103,opt,name=no_default_int32,json=noDefaultInt32", + Filename: "test.proto", } var E_NoDefaultInt64 = &proto.ExtensionDesc{ @@ -2225,7 +3015,8 @@ var E_NoDefaultInt64 = &proto.ExtensionDesc{ ExtensionType: (*int64)(nil), Field: 104, Name: "testdata.no_default_int64", - Tag: "varint,104,opt,name=no_default_int64", + Tag: "varint,104,opt,name=no_default_int64,json=noDefaultInt64", + Filename: "test.proto", } var E_NoDefaultUint32 = &proto.ExtensionDesc{ @@ -2233,7 +3024,8 @@ var E_NoDefaultUint32 = &proto.ExtensionDesc{ ExtensionType: (*uint32)(nil), Field: 105, Name: "testdata.no_default_uint32", - Tag: "varint,105,opt,name=no_default_uint32", + Tag: "varint,105,opt,name=no_default_uint32,json=noDefaultUint32", + Filename: "test.proto", } var E_NoDefaultUint64 = &proto.ExtensionDesc{ @@ -2241,7 +3033,8 @@ var E_NoDefaultUint64 = &proto.ExtensionDesc{ ExtensionType: (*uint64)(nil), Field: 106, Name: "testdata.no_default_uint64", - Tag: "varint,106,opt,name=no_default_uint64", + Tag: "varint,106,opt,name=no_default_uint64,json=noDefaultUint64", + Filename: "test.proto", } var E_NoDefaultSint32 = &proto.ExtensionDesc{ @@ -2249,7 +3042,8 @@ var E_NoDefaultSint32 = &proto.ExtensionDesc{ ExtensionType: (*int32)(nil), Field: 107, Name: "testdata.no_default_sint32", - Tag: "zigzag32,107,opt,name=no_default_sint32", + Tag: "zigzag32,107,opt,name=no_default_sint32,json=noDefaultSint32", + Filename: "test.proto", } var E_NoDefaultSint64 = &proto.ExtensionDesc{ @@ -2257,7 +3051,8 @@ var E_NoDefaultSint64 = &proto.ExtensionDesc{ ExtensionType: (*int64)(nil), Field: 108, Name: "testdata.no_default_sint64", - Tag: "zigzag64,108,opt,name=no_default_sint64", + Tag: "zigzag64,108,opt,name=no_default_sint64,json=noDefaultSint64", + Filename: "test.proto", } var E_NoDefaultFixed32 = &proto.ExtensionDesc{ @@ -2265,7 +3060,8 @@ var E_NoDefaultFixed32 = &proto.ExtensionDesc{ ExtensionType: (*uint32)(nil), Field: 109, Name: "testdata.no_default_fixed32", - Tag: "fixed32,109,opt,name=no_default_fixed32", + Tag: "fixed32,109,opt,name=no_default_fixed32,json=noDefaultFixed32", + Filename: "test.proto", } var E_NoDefaultFixed64 = &proto.ExtensionDesc{ @@ -2273,7 +3069,8 @@ var E_NoDefaultFixed64 = &proto.ExtensionDesc{ ExtensionType: (*uint64)(nil), Field: 110, Name: "testdata.no_default_fixed64", - Tag: "fixed64,110,opt,name=no_default_fixed64", + Tag: "fixed64,110,opt,name=no_default_fixed64,json=noDefaultFixed64", + Filename: "test.proto", } var E_NoDefaultSfixed32 = &proto.ExtensionDesc{ @@ -2281,7 +3078,8 @@ var E_NoDefaultSfixed32 = &proto.ExtensionDesc{ ExtensionType: (*int32)(nil), Field: 111, Name: "testdata.no_default_sfixed32", - Tag: "fixed32,111,opt,name=no_default_sfixed32", + Tag: "fixed32,111,opt,name=no_default_sfixed32,json=noDefaultSfixed32", + Filename: "test.proto", } var E_NoDefaultSfixed64 = &proto.ExtensionDesc{ @@ -2289,7 +3087,8 @@ var E_NoDefaultSfixed64 = &proto.ExtensionDesc{ ExtensionType: (*int64)(nil), Field: 112, Name: "testdata.no_default_sfixed64", - Tag: "fixed64,112,opt,name=no_default_sfixed64", + Tag: "fixed64,112,opt,name=no_default_sfixed64,json=noDefaultSfixed64", + Filename: "test.proto", } var E_NoDefaultBool = &proto.ExtensionDesc{ @@ -2297,7 +3096,8 @@ var E_NoDefaultBool = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 113, Name: "testdata.no_default_bool", - Tag: "varint,113,opt,name=no_default_bool", + Tag: "varint,113,opt,name=no_default_bool,json=noDefaultBool", + Filename: "test.proto", } var E_NoDefaultString = &proto.ExtensionDesc{ @@ -2305,7 +3105,8 @@ var E_NoDefaultString = &proto.ExtensionDesc{ ExtensionType: (*string)(nil), Field: 114, Name: "testdata.no_default_string", - Tag: "bytes,114,opt,name=no_default_string", + Tag: "bytes,114,opt,name=no_default_string,json=noDefaultString", + Filename: "test.proto", } var E_NoDefaultBytes = &proto.ExtensionDesc{ @@ -2313,7 +3114,8 @@ var E_NoDefaultBytes = &proto.ExtensionDesc{ ExtensionType: ([]byte)(nil), Field: 115, Name: "testdata.no_default_bytes", - Tag: "bytes,115,opt,name=no_default_bytes", + Tag: "bytes,115,opt,name=no_default_bytes,json=noDefaultBytes", + Filename: "test.proto", } var E_NoDefaultEnum = &proto.ExtensionDesc{ @@ -2321,7 +3123,8 @@ var E_NoDefaultEnum = &proto.ExtensionDesc{ ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), Field: 116, Name: "testdata.no_default_enum", - Tag: "varint,116,opt,name=no_default_enum,enum=testdata.DefaultsMessage_DefaultsEnum", + Tag: "varint,116,opt,name=no_default_enum,json=noDefaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum", + Filename: "test.proto", } var E_DefaultDouble = &proto.ExtensionDesc{ @@ -2329,7 +3132,8 @@ var E_DefaultDouble = &proto.ExtensionDesc{ ExtensionType: (*float64)(nil), Field: 201, Name: "testdata.default_double", - Tag: "fixed64,201,opt,name=default_double,def=3.1415", + Tag: "fixed64,201,opt,name=default_double,json=defaultDouble,def=3.1415", + Filename: "test.proto", } var E_DefaultFloat = &proto.ExtensionDesc{ @@ -2337,7 +3141,8 @@ var E_DefaultFloat = &proto.ExtensionDesc{ ExtensionType: (*float32)(nil), Field: 202, Name: "testdata.default_float", - Tag: "fixed32,202,opt,name=default_float,def=3.14", + Tag: "fixed32,202,opt,name=default_float,json=defaultFloat,def=3.14", + Filename: "test.proto", } var E_DefaultInt32 = &proto.ExtensionDesc{ @@ -2345,7 +3150,8 @@ var E_DefaultInt32 = &proto.ExtensionDesc{ ExtensionType: (*int32)(nil), Field: 203, Name: "testdata.default_int32", - Tag: "varint,203,opt,name=default_int32,def=42", + Tag: "varint,203,opt,name=default_int32,json=defaultInt32,def=42", + Filename: "test.proto", } var E_DefaultInt64 = &proto.ExtensionDesc{ @@ -2353,7 +3159,8 @@ var E_DefaultInt64 = &proto.ExtensionDesc{ ExtensionType: (*int64)(nil), Field: 204, Name: "testdata.default_int64", - Tag: "varint,204,opt,name=default_int64,def=43", + Tag: "varint,204,opt,name=default_int64,json=defaultInt64,def=43", + Filename: "test.proto", } var E_DefaultUint32 = &proto.ExtensionDesc{ @@ -2361,7 +3168,8 @@ var E_DefaultUint32 = &proto.ExtensionDesc{ ExtensionType: (*uint32)(nil), Field: 205, Name: "testdata.default_uint32", - Tag: "varint,205,opt,name=default_uint32,def=44", + Tag: "varint,205,opt,name=default_uint32,json=defaultUint32,def=44", + Filename: "test.proto", } var E_DefaultUint64 = &proto.ExtensionDesc{ @@ -2369,7 +3177,8 @@ var E_DefaultUint64 = &proto.ExtensionDesc{ ExtensionType: (*uint64)(nil), Field: 206, Name: "testdata.default_uint64", - Tag: "varint,206,opt,name=default_uint64,def=45", + Tag: "varint,206,opt,name=default_uint64,json=defaultUint64,def=45", + Filename: "test.proto", } var E_DefaultSint32 = &proto.ExtensionDesc{ @@ -2377,7 +3186,8 @@ var E_DefaultSint32 = &proto.ExtensionDesc{ ExtensionType: (*int32)(nil), Field: 207, Name: "testdata.default_sint32", - Tag: "zigzag32,207,opt,name=default_sint32,def=46", + Tag: "zigzag32,207,opt,name=default_sint32,json=defaultSint32,def=46", + Filename: "test.proto", } var E_DefaultSint64 = &proto.ExtensionDesc{ @@ -2385,7 +3195,8 @@ var E_DefaultSint64 = &proto.ExtensionDesc{ ExtensionType: (*int64)(nil), Field: 208, Name: "testdata.default_sint64", - Tag: "zigzag64,208,opt,name=default_sint64,def=47", + Tag: "zigzag64,208,opt,name=default_sint64,json=defaultSint64,def=47", + Filename: "test.proto", } var E_DefaultFixed32 = &proto.ExtensionDesc{ @@ -2393,7 +3204,8 @@ var E_DefaultFixed32 = &proto.ExtensionDesc{ ExtensionType: (*uint32)(nil), Field: 209, Name: "testdata.default_fixed32", - Tag: "fixed32,209,opt,name=default_fixed32,def=48", + Tag: "fixed32,209,opt,name=default_fixed32,json=defaultFixed32,def=48", + Filename: "test.proto", } var E_DefaultFixed64 = &proto.ExtensionDesc{ @@ -2401,7 +3213,8 @@ var E_DefaultFixed64 = &proto.ExtensionDesc{ ExtensionType: (*uint64)(nil), Field: 210, Name: "testdata.default_fixed64", - Tag: "fixed64,210,opt,name=default_fixed64,def=49", + Tag: "fixed64,210,opt,name=default_fixed64,json=defaultFixed64,def=49", + Filename: "test.proto", } var E_DefaultSfixed32 = &proto.ExtensionDesc{ @@ -2409,7 +3222,8 @@ var E_DefaultSfixed32 = &proto.ExtensionDesc{ ExtensionType: (*int32)(nil), Field: 211, Name: "testdata.default_sfixed32", - Tag: "fixed32,211,opt,name=default_sfixed32,def=50", + Tag: "fixed32,211,opt,name=default_sfixed32,json=defaultSfixed32,def=50", + Filename: "test.proto", } var E_DefaultSfixed64 = &proto.ExtensionDesc{ @@ -2417,7 +3231,8 @@ var E_DefaultSfixed64 = &proto.ExtensionDesc{ ExtensionType: (*int64)(nil), Field: 212, Name: "testdata.default_sfixed64", - Tag: "fixed64,212,opt,name=default_sfixed64,def=51", + Tag: "fixed64,212,opt,name=default_sfixed64,json=defaultSfixed64,def=51", + Filename: "test.proto", } var E_DefaultBool = &proto.ExtensionDesc{ @@ -2425,7 +3240,8 @@ var E_DefaultBool = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 213, Name: "testdata.default_bool", - Tag: "varint,213,opt,name=default_bool,def=1", + Tag: "varint,213,opt,name=default_bool,json=defaultBool,def=1", + Filename: "test.proto", } var E_DefaultString = &proto.ExtensionDesc{ @@ -2433,7 +3249,8 @@ var E_DefaultString = &proto.ExtensionDesc{ ExtensionType: (*string)(nil), Field: 214, Name: "testdata.default_string", - Tag: "bytes,214,opt,name=default_string,def=Hello, string", + Tag: "bytes,214,opt,name=default_string,json=defaultString,def=Hello, string", + Filename: "test.proto", } var E_DefaultBytes = &proto.ExtensionDesc{ @@ -2441,7 +3258,8 @@ var E_DefaultBytes = &proto.ExtensionDesc{ ExtensionType: ([]byte)(nil), Field: 215, Name: "testdata.default_bytes", - Tag: "bytes,215,opt,name=default_bytes,def=Hello, bytes", + Tag: "bytes,215,opt,name=default_bytes,json=defaultBytes,def=Hello, bytes", + Filename: "test.proto", } var E_DefaultEnum = &proto.ExtensionDesc{ @@ -2449,7 +3267,8 @@ var E_DefaultEnum = &proto.ExtensionDesc{ ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), Field: 216, Name: "testdata.default_enum", - Tag: "varint,216,opt,name=default_enum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1", + Tag: "varint,216,opt,name=default_enum,json=defaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1", + Filename: "test.proto", } var E_X201 = &proto.ExtensionDesc{ @@ -2458,6 +3277,7 @@ var E_X201 = &proto.ExtensionDesc{ Field: 201, Name: "testdata.x201", Tag: "bytes,201,opt,name=x201", + Filename: "test.proto", } var E_X202 = &proto.ExtensionDesc{ @@ -2466,6 +3286,7 @@ var E_X202 = &proto.ExtensionDesc{ Field: 202, Name: "testdata.x202", Tag: "bytes,202,opt,name=x202", + Filename: "test.proto", } var E_X203 = &proto.ExtensionDesc{ @@ -2474,6 +3295,7 @@ var E_X203 = &proto.ExtensionDesc{ Field: 203, Name: "testdata.x203", Tag: "bytes,203,opt,name=x203", + Filename: "test.proto", } var E_X204 = &proto.ExtensionDesc{ @@ -2482,6 +3304,7 @@ var E_X204 = &proto.ExtensionDesc{ Field: 204, Name: "testdata.x204", Tag: "bytes,204,opt,name=x204", + Filename: "test.proto", } var E_X205 = &proto.ExtensionDesc{ @@ -2490,6 +3313,7 @@ var E_X205 = &proto.ExtensionDesc{ Field: 205, Name: "testdata.x205", Tag: "bytes,205,opt,name=x205", + Filename: "test.proto", } var E_X206 = &proto.ExtensionDesc{ @@ -2498,6 +3322,7 @@ var E_X206 = &proto.ExtensionDesc{ Field: 206, Name: "testdata.x206", Tag: "bytes,206,opt,name=x206", + Filename: "test.proto", } var E_X207 = &proto.ExtensionDesc{ @@ -2506,6 +3331,7 @@ var E_X207 = &proto.ExtensionDesc{ Field: 207, Name: "testdata.x207", Tag: "bytes,207,opt,name=x207", + Filename: "test.proto", } var E_X208 = &proto.ExtensionDesc{ @@ -2514,6 +3340,7 @@ var E_X208 = &proto.ExtensionDesc{ Field: 208, Name: "testdata.x208", Tag: "bytes,208,opt,name=x208", + Filename: "test.proto", } var E_X209 = &proto.ExtensionDesc{ @@ -2522,6 +3349,7 @@ var E_X209 = &proto.ExtensionDesc{ Field: 209, Name: "testdata.x209", Tag: "bytes,209,opt,name=x209", + Filename: "test.proto", } var E_X210 = &proto.ExtensionDesc{ @@ -2530,6 +3358,7 @@ var E_X210 = &proto.ExtensionDesc{ Field: 210, Name: "testdata.x210", Tag: "bytes,210,opt,name=x210", + Filename: "test.proto", } var E_X211 = &proto.ExtensionDesc{ @@ -2538,6 +3367,7 @@ var E_X211 = &proto.ExtensionDesc{ Field: 211, Name: "testdata.x211", Tag: "bytes,211,opt,name=x211", + Filename: "test.proto", } var E_X212 = &proto.ExtensionDesc{ @@ -2546,6 +3376,7 @@ var E_X212 = &proto.ExtensionDesc{ Field: 212, Name: "testdata.x212", Tag: "bytes,212,opt,name=x212", + Filename: "test.proto", } var E_X213 = &proto.ExtensionDesc{ @@ -2554,6 +3385,7 @@ var E_X213 = &proto.ExtensionDesc{ Field: 213, Name: "testdata.x213", Tag: "bytes,213,opt,name=x213", + Filename: "test.proto", } var E_X214 = &proto.ExtensionDesc{ @@ -2562,6 +3394,7 @@ var E_X214 = &proto.ExtensionDesc{ Field: 214, Name: "testdata.x214", Tag: "bytes,214,opt,name=x214", + Filename: "test.proto", } var E_X215 = &proto.ExtensionDesc{ @@ -2570,6 +3403,7 @@ var E_X215 = &proto.ExtensionDesc{ Field: 215, Name: "testdata.x215", Tag: "bytes,215,opt,name=x215", + Filename: "test.proto", } var E_X216 = &proto.ExtensionDesc{ @@ -2578,6 +3412,7 @@ var E_X216 = &proto.ExtensionDesc{ Field: 216, Name: "testdata.x216", Tag: "bytes,216,opt,name=x216", + Filename: "test.proto", } var E_X217 = &proto.ExtensionDesc{ @@ -2586,6 +3421,7 @@ var E_X217 = &proto.ExtensionDesc{ Field: 217, Name: "testdata.x217", Tag: "bytes,217,opt,name=x217", + Filename: "test.proto", } var E_X218 = &proto.ExtensionDesc{ @@ -2594,6 +3430,7 @@ var E_X218 = &proto.ExtensionDesc{ Field: 218, Name: "testdata.x218", Tag: "bytes,218,opt,name=x218", + Filename: "test.proto", } var E_X219 = &proto.ExtensionDesc{ @@ -2602,6 +3439,7 @@ var E_X219 = &proto.ExtensionDesc{ Field: 219, Name: "testdata.x219", Tag: "bytes,219,opt,name=x219", + Filename: "test.proto", } var E_X220 = &proto.ExtensionDesc{ @@ -2610,6 +3448,7 @@ var E_X220 = &proto.ExtensionDesc{ Field: 220, Name: "testdata.x220", Tag: "bytes,220,opt,name=x220", + Filename: "test.proto", } var E_X221 = &proto.ExtensionDesc{ @@ -2618,6 +3457,7 @@ var E_X221 = &proto.ExtensionDesc{ Field: 221, Name: "testdata.x221", Tag: "bytes,221,opt,name=x221", + Filename: "test.proto", } var E_X222 = &proto.ExtensionDesc{ @@ -2626,6 +3466,7 @@ var E_X222 = &proto.ExtensionDesc{ Field: 222, Name: "testdata.x222", Tag: "bytes,222,opt,name=x222", + Filename: "test.proto", } var E_X223 = &proto.ExtensionDesc{ @@ -2634,6 +3475,7 @@ var E_X223 = &proto.ExtensionDesc{ Field: 223, Name: "testdata.x223", Tag: "bytes,223,opt,name=x223", + Filename: "test.proto", } var E_X224 = &proto.ExtensionDesc{ @@ -2642,6 +3484,7 @@ var E_X224 = &proto.ExtensionDesc{ Field: 224, Name: "testdata.x224", Tag: "bytes,224,opt,name=x224", + Filename: "test.proto", } var E_X225 = &proto.ExtensionDesc{ @@ -2650,6 +3493,7 @@ var E_X225 = &proto.ExtensionDesc{ Field: 225, Name: "testdata.x225", Tag: "bytes,225,opt,name=x225", + Filename: "test.proto", } var E_X226 = &proto.ExtensionDesc{ @@ -2658,6 +3502,7 @@ var E_X226 = &proto.ExtensionDesc{ Field: 226, Name: "testdata.x226", Tag: "bytes,226,opt,name=x226", + Filename: "test.proto", } var E_X227 = &proto.ExtensionDesc{ @@ -2666,6 +3511,7 @@ var E_X227 = &proto.ExtensionDesc{ Field: 227, Name: "testdata.x227", Tag: "bytes,227,opt,name=x227", + Filename: "test.proto", } var E_X228 = &proto.ExtensionDesc{ @@ -2674,6 +3520,7 @@ var E_X228 = &proto.ExtensionDesc{ Field: 228, Name: "testdata.x228", Tag: "bytes,228,opt,name=x228", + Filename: "test.proto", } var E_X229 = &proto.ExtensionDesc{ @@ -2682,6 +3529,7 @@ var E_X229 = &proto.ExtensionDesc{ Field: 229, Name: "testdata.x229", Tag: "bytes,229,opt,name=x229", + Filename: "test.proto", } var E_X230 = &proto.ExtensionDesc{ @@ -2690,6 +3538,7 @@ var E_X230 = &proto.ExtensionDesc{ Field: 230, Name: "testdata.x230", Tag: "bytes,230,opt,name=x230", + Filename: "test.proto", } var E_X231 = &proto.ExtensionDesc{ @@ -2698,6 +3547,7 @@ var E_X231 = &proto.ExtensionDesc{ Field: 231, Name: "testdata.x231", Tag: "bytes,231,opt,name=x231", + Filename: "test.proto", } var E_X232 = &proto.ExtensionDesc{ @@ -2706,6 +3556,7 @@ var E_X232 = &proto.ExtensionDesc{ Field: 232, Name: "testdata.x232", Tag: "bytes,232,opt,name=x232", + Filename: "test.proto", } var E_X233 = &proto.ExtensionDesc{ @@ -2714,6 +3565,7 @@ var E_X233 = &proto.ExtensionDesc{ Field: 233, Name: "testdata.x233", Tag: "bytes,233,opt,name=x233", + Filename: "test.proto", } var E_X234 = &proto.ExtensionDesc{ @@ -2722,6 +3574,7 @@ var E_X234 = &proto.ExtensionDesc{ Field: 234, Name: "testdata.x234", Tag: "bytes,234,opt,name=x234", + Filename: "test.proto", } var E_X235 = &proto.ExtensionDesc{ @@ -2730,6 +3583,7 @@ var E_X235 = &proto.ExtensionDesc{ Field: 235, Name: "testdata.x235", Tag: "bytes,235,opt,name=x235", + Filename: "test.proto", } var E_X236 = &proto.ExtensionDesc{ @@ -2738,6 +3592,7 @@ var E_X236 = &proto.ExtensionDesc{ Field: 236, Name: "testdata.x236", Tag: "bytes,236,opt,name=x236", + Filename: "test.proto", } var E_X237 = &proto.ExtensionDesc{ @@ -2746,6 +3601,7 @@ var E_X237 = &proto.ExtensionDesc{ Field: 237, Name: "testdata.x237", Tag: "bytes,237,opt,name=x237", + Filename: "test.proto", } var E_X238 = &proto.ExtensionDesc{ @@ -2754,6 +3610,7 @@ var E_X238 = &proto.ExtensionDesc{ Field: 238, Name: "testdata.x238", Tag: "bytes,238,opt,name=x238", + Filename: "test.proto", } var E_X239 = &proto.ExtensionDesc{ @@ -2762,6 +3619,7 @@ var E_X239 = &proto.ExtensionDesc{ Field: 239, Name: "testdata.x239", Tag: "bytes,239,opt,name=x239", + Filename: "test.proto", } var E_X240 = &proto.ExtensionDesc{ @@ -2770,6 +3628,7 @@ var E_X240 = &proto.ExtensionDesc{ Field: 240, Name: "testdata.x240", Tag: "bytes,240,opt,name=x240", + Filename: "test.proto", } var E_X241 = &proto.ExtensionDesc{ @@ -2778,6 +3637,7 @@ var E_X241 = &proto.ExtensionDesc{ Field: 241, Name: "testdata.x241", Tag: "bytes,241,opt,name=x241", + Filename: "test.proto", } var E_X242 = &proto.ExtensionDesc{ @@ -2786,6 +3646,7 @@ var E_X242 = &proto.ExtensionDesc{ Field: 242, Name: "testdata.x242", Tag: "bytes,242,opt,name=x242", + Filename: "test.proto", } var E_X243 = &proto.ExtensionDesc{ @@ -2794,6 +3655,7 @@ var E_X243 = &proto.ExtensionDesc{ Field: 243, Name: "testdata.x243", Tag: "bytes,243,opt,name=x243", + Filename: "test.proto", } var E_X244 = &proto.ExtensionDesc{ @@ -2802,6 +3664,7 @@ var E_X244 = &proto.ExtensionDesc{ Field: 244, Name: "testdata.x244", Tag: "bytes,244,opt,name=x244", + Filename: "test.proto", } var E_X245 = &proto.ExtensionDesc{ @@ -2810,6 +3673,7 @@ var E_X245 = &proto.ExtensionDesc{ Field: 245, Name: "testdata.x245", Tag: "bytes,245,opt,name=x245", + Filename: "test.proto", } var E_X246 = &proto.ExtensionDesc{ @@ -2818,6 +3682,7 @@ var E_X246 = &proto.ExtensionDesc{ Field: 246, Name: "testdata.x246", Tag: "bytes,246,opt,name=x246", + Filename: "test.proto", } var E_X247 = &proto.ExtensionDesc{ @@ -2826,6 +3691,7 @@ var E_X247 = &proto.ExtensionDesc{ Field: 247, Name: "testdata.x247", Tag: "bytes,247,opt,name=x247", + Filename: "test.proto", } var E_X248 = &proto.ExtensionDesc{ @@ -2834,6 +3700,7 @@ var E_X248 = &proto.ExtensionDesc{ Field: 248, Name: "testdata.x248", Tag: "bytes,248,opt,name=x248", + Filename: "test.proto", } var E_X249 = &proto.ExtensionDesc{ @@ -2842,6 +3709,7 @@ var E_X249 = &proto.ExtensionDesc{ Field: 249, Name: "testdata.x249", Tag: "bytes,249,opt,name=x249", + Filename: "test.proto", } var E_X250 = &proto.ExtensionDesc{ @@ -2850,9 +3718,53 @@ var E_X250 = &proto.ExtensionDesc{ Field: 250, Name: "testdata.x250", Tag: "bytes,250,opt,name=x250", + Filename: "test.proto", } func init() { + proto.RegisterType((*GoEnum)(nil), "testdata.GoEnum") + proto.RegisterType((*GoTestField)(nil), "testdata.GoTestField") + proto.RegisterType((*GoTest)(nil), "testdata.GoTest") + proto.RegisterType((*GoTest_RequiredGroup)(nil), "testdata.GoTest.RequiredGroup") + proto.RegisterType((*GoTest_RepeatedGroup)(nil), "testdata.GoTest.RepeatedGroup") + proto.RegisterType((*GoTest_OptionalGroup)(nil), "testdata.GoTest.OptionalGroup") + proto.RegisterType((*GoTestRequiredGroupField)(nil), "testdata.GoTestRequiredGroupField") + proto.RegisterType((*GoTestRequiredGroupField_Group)(nil), "testdata.GoTestRequiredGroupField.Group") + proto.RegisterType((*GoSkipTest)(nil), "testdata.GoSkipTest") + proto.RegisterType((*GoSkipTest_SkipGroup)(nil), "testdata.GoSkipTest.SkipGroup") + proto.RegisterType((*NonPackedTest)(nil), "testdata.NonPackedTest") + proto.RegisterType((*PackedTest)(nil), "testdata.PackedTest") + proto.RegisterType((*MaxTag)(nil), "testdata.MaxTag") + proto.RegisterType((*OldMessage)(nil), "testdata.OldMessage") + proto.RegisterType((*OldMessage_Nested)(nil), "testdata.OldMessage.Nested") + proto.RegisterType((*NewMessage)(nil), "testdata.NewMessage") + proto.RegisterType((*NewMessage_Nested)(nil), "testdata.NewMessage.Nested") + proto.RegisterType((*InnerMessage)(nil), "testdata.InnerMessage") + proto.RegisterType((*OtherMessage)(nil), "testdata.OtherMessage") + proto.RegisterType((*RequiredInnerMessage)(nil), "testdata.RequiredInnerMessage") + proto.RegisterType((*MyMessage)(nil), "testdata.MyMessage") + proto.RegisterType((*MyMessage_SomeGroup)(nil), "testdata.MyMessage.SomeGroup") + proto.RegisterType((*Ext)(nil), "testdata.Ext") + proto.RegisterType((*ComplexExtension)(nil), "testdata.ComplexExtension") + proto.RegisterType((*DefaultsMessage)(nil), "testdata.DefaultsMessage") + proto.RegisterType((*MyMessageSet)(nil), "testdata.MyMessageSet") + proto.RegisterType((*Empty)(nil), "testdata.Empty") + proto.RegisterType((*MessageList)(nil), "testdata.MessageList") + proto.RegisterType((*MessageList_Message)(nil), "testdata.MessageList.Message") + proto.RegisterType((*Strings)(nil), "testdata.Strings") + proto.RegisterType((*Defaults)(nil), "testdata.Defaults") + proto.RegisterType((*SubDefaults)(nil), "testdata.SubDefaults") + proto.RegisterType((*RepeatedEnum)(nil), "testdata.RepeatedEnum") + proto.RegisterType((*MoreRepeated)(nil), "testdata.MoreRepeated") + proto.RegisterType((*GroupOld)(nil), "testdata.GroupOld") + proto.RegisterType((*GroupOld_G)(nil), "testdata.GroupOld.G") + proto.RegisterType((*GroupNew)(nil), "testdata.GroupNew") + proto.RegisterType((*GroupNew_G)(nil), "testdata.GroupNew.G") + proto.RegisterType((*FloatingPoint)(nil), "testdata.FloatingPoint") + proto.RegisterType((*MessageWithMap)(nil), "testdata.MessageWithMap") + proto.RegisterType((*Oneof)(nil), "testdata.Oneof") + proto.RegisterType((*Oneof_F_Group)(nil), "testdata.Oneof.F_Group") + proto.RegisterType((*Communique)(nil), "testdata.Communique") proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) @@ -2863,6 +3775,8 @@ func init() { proto.RegisterExtension(E_Ext_Text) proto.RegisterExtension(E_Ext_Number) proto.RegisterExtension(E_Greeting) + proto.RegisterExtension(E_Complex) + proto.RegisterExtension(E_RComplex) proto.RegisterExtension(E_NoDefaultDouble) proto.RegisterExtension(E_NoDefaultFloat) proto.RegisterExtension(E_NoDefaultInt32) @@ -2946,3 +3860,288 @@ func init() { proto.RegisterExtension(E_X249) proto.RegisterExtension(E_X250) } + +func init() { proto.RegisterFile("test.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 4453 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x5a, 0xc9, 0x77, 0xdb, 0x48, + 0x7a, 0x37, 0xc0, 0xfd, 0x23, 0x25, 0x42, 0x65, 0xb5, 0x9b, 0x96, 0xbc, 0xc0, 0x9c, 0xe9, 0x6e, + 0x7a, 0xd3, 0x48, 0x20, 0x44, 0xdb, 0x74, 0xa7, 0xdf, 0xf3, 0x42, 0xca, 0x7a, 0x63, 0x89, 0x0a, + 0xa4, 0xee, 0x7e, 0xd3, 0x39, 0xf0, 0x51, 0x22, 0x44, 0xb3, 0x4d, 0x02, 0x34, 0x09, 0xc5, 0x52, + 0x72, 0xe9, 0x4b, 0x72, 0xcd, 0x76, 0xc9, 0x35, 0xa7, 0x9c, 0x92, 0xbc, 0x97, 0x7f, 0x22, 0xe9, + 0xee, 0x59, 0x7b, 0xd6, 0xac, 0x93, 0x7d, 0x99, 0xec, 0xdb, 0x4c, 0x92, 0x4b, 0xcf, 0xab, 0xaf, + 0x0a, 0x40, 0x01, 0x24, 0x20, 0xf9, 0x24, 0x56, 0xd5, 0xef, 0xf7, 0xd5, 0xf6, 0xab, 0xef, 0xab, + 0xaf, 0x20, 0x00, 0xc7, 0x9c, 0x38, 0x2b, 0xa3, 0xb1, 0xed, 0xd8, 0x24, 0x4b, 0x7f, 0x77, 0x3b, + 0x4e, 0xa7, 0x7c, 0x1d, 0xd2, 0x1b, 0x76, 0xc3, 0x3a, 0x1a, 0x92, 0xab, 0x90, 0x38, 0xb4, 0xed, + 0x92, 0xa4, 0xca, 0x95, 0x79, 0x6d, 0x6e, 0xc5, 0x45, 0xac, 0x34, 0x5b, 0x2d, 0x83, 0xb6, 0x94, + 0xef, 0x40, 0x7e, 0xc3, 0xde, 0x33, 0x27, 0x4e, 0xb3, 0x6f, 0x0e, 0xba, 0x64, 0x11, 0x52, 0x4f, + 0x3b, 0xfb, 0xe6, 0x00, 0x19, 0x39, 0x83, 0x15, 0x08, 0x81, 0xe4, 0xde, 0xc9, 0xc8, 0x2c, 0xc9, + 0x58, 0x89, 0xbf, 0xcb, 0xbf, 0x72, 0x85, 0x76, 0x42, 0x99, 0xe4, 0x3a, 0x24, 0xbf, 0xdc, 0xb7, + 0xba, 0xbc, 0x97, 0xd7, 0xfc, 0x5e, 0x58, 0xfb, 0xca, 0x97, 0x37, 0xb7, 0x1f, 0x1b, 0x08, 0xa1, + 0xf6, 0xf7, 0x3a, 0xfb, 0x03, 0x6a, 0x4a, 0xa2, 0xf6, 0xb1, 0x40, 0x6b, 0x77, 0x3a, 0xe3, 0xce, + 0xb0, 0x94, 0x50, 0xa5, 0x4a, 0xca, 0x60, 0x05, 0x72, 0x1f, 0xe6, 0x0c, 0xf3, 0xc5, 0x51, 0x7f, + 0x6c, 0x76, 0x71, 0x70, 0xa5, 0xa4, 0x2a, 0x57, 0xf2, 0xd3, 0xf6, 0xb1, 0xd1, 0x08, 0x62, 0x19, + 0x79, 0x64, 0x76, 0x1c, 0x97, 0x9c, 0x52, 0x13, 0xb1, 0x64, 0x01, 0x4b, 0xc9, 0xad, 0x91, 0xd3, + 0xb7, 0xad, 0xce, 0x80, 0x91, 0xd3, 0xaa, 0x14, 0x43, 0x0e, 0x60, 0xc9, 0x9b, 0x50, 0x6c, 0xb6, + 0x1f, 0xda, 0xf6, 0xa0, 0x3d, 0xe6, 0x23, 0x2a, 0x81, 0x2a, 0x57, 0xb2, 0xc6, 0x5c, 0x93, 0xd6, + 0xba, 0xc3, 0x24, 0x15, 0x50, 0x9a, 0xed, 0x4d, 0xcb, 0xa9, 0x6a, 0x3e, 0x30, 0xaf, 0xca, 0x95, + 0x94, 0x31, 0xdf, 0xc4, 0xea, 0x29, 0x64, 0x4d, 0xf7, 0x91, 0x05, 0x55, 0xae, 0x24, 0x18, 0xb2, + 0xa6, 0x7b, 0xc8, 0x5b, 0x40, 0x9a, 0xed, 0x66, 0xff, 0xd8, 0xec, 0x8a, 0x56, 0xe7, 0x54, 0xb9, + 0x92, 0x31, 0x94, 0x26, 0x6f, 0x98, 0x81, 0x16, 0x2d, 0xcf, 0xab, 0x72, 0x25, 0xed, 0xa2, 0x05, + 0xdb, 0x37, 0x60, 0xa1, 0xd9, 0x7e, 0xb7, 0x1f, 0x1c, 0x70, 0x51, 0x95, 0x2b, 0x73, 0x46, 0xb1, + 0xc9, 0xea, 0xa7, 0xb1, 0xa2, 0x61, 0x45, 0x95, 0x2b, 0x49, 0x8e, 0x15, 0xec, 0xe2, 0xec, 0x9a, + 0x03, 0xbb, 0xe3, 0xf8, 0xd0, 0x05, 0x55, 0xae, 0xc8, 0xc6, 0x7c, 0x13, 0xab, 0x83, 0x56, 0x1f, + 0xdb, 0x47, 0xfb, 0x03, 0xd3, 0x87, 0x12, 0x55, 0xae, 0x48, 0x46, 0xb1, 0xc9, 0xea, 0x83, 0xd8, + 0x5d, 0x67, 0xdc, 0xb7, 0x7a, 0x3e, 0xf6, 0x3c, 0xea, 0xb7, 0xd8, 0x64, 0xf5, 0xc1, 0x11, 0x3c, + 0x3c, 0x71, 0xcc, 0x89, 0x0f, 0x35, 0x55, 0xb9, 0x52, 0x30, 0xe6, 0x9b, 0x58, 0x1d, 0xb2, 0x1a, + 0x5a, 0x83, 0x43, 0x55, 0xae, 0x2c, 0x50, 0xab, 0x33, 0xd6, 0x60, 0x37, 0xb4, 0x06, 0x3d, 0x55, + 0xae, 0x10, 0x8e, 0x15, 0xd6, 0x40, 0xd4, 0x0c, 0x13, 0x62, 0x69, 0x51, 0x4d, 0x08, 0x9a, 0x61, + 0x95, 0x41, 0xcd, 0x70, 0xe0, 0x6b, 0x6a, 0x42, 0xd4, 0x4c, 0x08, 0x89, 0x9d, 0x73, 0xe4, 0x05, + 0x35, 0x21, 0x6a, 0x86, 0x23, 0x43, 0x9a, 0xe1, 0xd8, 0xd7, 0xd5, 0x44, 0x50, 0x33, 0x53, 0x68, + 0xd1, 0x72, 0x49, 0x4d, 0x04, 0x35, 0xc3, 0xd1, 0x41, 0xcd, 0x70, 0xf0, 0x45, 0x35, 0x11, 0xd0, + 0x4c, 0x18, 0x2b, 0x1a, 0x5e, 0x52, 0x13, 0x01, 0xcd, 0x88, 0xb3, 0x73, 0x35, 0xc3, 0xa1, 0xcb, + 0x6a, 0x42, 0xd4, 0x8c, 0x68, 0xd5, 0xd3, 0x0c, 0x87, 0x5e, 0x52, 0x13, 0x01, 0xcd, 0x88, 0x58, + 0x4f, 0x33, 0x1c, 0x7b, 0x59, 0x4d, 0x04, 0x34, 0xc3, 0xb1, 0xd7, 0x45, 0xcd, 0x70, 0xe8, 0xc7, + 0x92, 0x9a, 0x10, 0x45, 0xc3, 0xa1, 0x37, 0x03, 0xa2, 0xe1, 0xd8, 0x4f, 0x28, 0x56, 0x54, 0x4d, + 0x18, 0x2c, 0xae, 0xc2, 0xa7, 0x14, 0x2c, 0xca, 0x86, 0x83, 0x7d, 0xd9, 0xd8, 0xdc, 0x05, 0x95, + 0xae, 0xa8, 0x92, 0x27, 0x1b, 0xd7, 0x2f, 0x89, 0xb2, 0xf1, 0x80, 0x57, 0xd1, 0xd5, 0x72, 0xd9, + 0x4c, 0x21, 0x6b, 0xba, 0x8f, 0x54, 0x55, 0xc9, 0x97, 0x8d, 0x87, 0x0c, 0xc8, 0xc6, 0xc3, 0x5e, + 0x53, 0x25, 0x51, 0x36, 0x33, 0xd0, 0xa2, 0xe5, 0xb2, 0x2a, 0x89, 0xb2, 0xf1, 0xd0, 0xa2, 0x6c, + 0x3c, 0xf0, 0x17, 0x54, 0x49, 0x90, 0xcd, 0x34, 0x56, 0x34, 0xfc, 0x45, 0x55, 0x12, 0x64, 0x13, + 0x9c, 0x1d, 0x93, 0x8d, 0x07, 0x7d, 0x43, 0x95, 0x7c, 0xd9, 0x04, 0xad, 0x72, 0xd9, 0x78, 0xd0, + 0x37, 0x55, 0x49, 0x90, 0x4d, 0x10, 0xcb, 0x65, 0xe3, 0x61, 0xdf, 0xc2, 0xf8, 0xe6, 0xca, 0xc6, + 0xc3, 0x0a, 0xb2, 0xf1, 0xa0, 0xbf, 0x43, 0x63, 0xa1, 0x27, 0x1b, 0x0f, 0x2a, 0xca, 0xc6, 0xc3, + 0xfe, 0x2e, 0xc5, 0xfa, 0xb2, 0x99, 0x06, 0x8b, 0xab, 0xf0, 0x7b, 0x14, 0xec, 0xcb, 0xc6, 0x03, + 0xaf, 0xe0, 0x20, 0xa8, 0x6c, 0xba, 0xe6, 0x61, 0xe7, 0x68, 0x40, 0x25, 0x56, 0xa1, 0xba, 0xa9, + 0x27, 0x9d, 0xf1, 0x91, 0x49, 0x47, 0x62, 0xdb, 0x83, 0xc7, 0x6e, 0x1b, 0x59, 0xa1, 0xc6, 0x99, + 0x7c, 0x7c, 0xc2, 0x75, 0xaa, 0x9f, 0xba, 0x5c, 0xd5, 0x8c, 0x22, 0xd3, 0xd0, 0x34, 0xbe, 0xa6, + 0x0b, 0xf8, 0x1b, 0x54, 0x45, 0x75, 0xb9, 0xa6, 0x33, 0x7c, 0x4d, 0xf7, 0xf1, 0x55, 0x38, 0xef, + 0x4b, 0xc9, 0x67, 0xdc, 0xa4, 0x5a, 0xaa, 0x27, 0xaa, 0xda, 0xaa, 0xb1, 0xe0, 0x0a, 0x6a, 0x16, + 0x29, 0xd0, 0xcd, 0x2d, 0x2a, 0xa9, 0x7a, 0xa2, 0xa6, 0x7b, 0x24, 0xb1, 0x27, 0x8d, 0xca, 0x90, + 0x0b, 0xcb, 0xe7, 0xdc, 0xa6, 0xca, 0xaa, 0x27, 0xab, 0xda, 0xea, 0xaa, 0xa1, 0x70, 0x7d, 0xcd, + 0xe0, 0x04, 0xfa, 0x59, 0xa1, 0x0a, 0xab, 0x27, 0x6b, 0xba, 0xc7, 0x09, 0xf6, 0xb3, 0xe0, 0x0a, + 0xcd, 0xa7, 0x7c, 0x89, 0x2a, 0xad, 0x9e, 0xae, 0xae, 0xe9, 0x6b, 0xeb, 0xf7, 0x8c, 0x22, 0x53, + 0x9c, 0xcf, 0xd1, 0x69, 0x3f, 0x5c, 0x72, 0x3e, 0x69, 0x95, 0x6a, 0xae, 0x9e, 0xd6, 0xee, 0xac, + 0xdd, 0xd5, 0xee, 0x1a, 0x0a, 0xd7, 0x9e, 0xcf, 0x7a, 0x87, 0xb2, 0xb8, 0xf8, 0x7c, 0xd6, 0x1a, + 0x55, 0x5f, 0x5d, 0x79, 0x66, 0x0e, 0x06, 0xf6, 0x2d, 0xb5, 0xfc, 0xd2, 0x1e, 0x0f, 0xba, 0xd7, + 0xca, 0x60, 0x28, 0x5c, 0x8f, 0x62, 0xaf, 0x0b, 0xae, 0x20, 0x7d, 0xfa, 0xaf, 0xd1, 0x7b, 0x58, + 0xa1, 0x9e, 0x79, 0xd8, 0xef, 0x59, 0xf6, 0xc4, 0x34, 0x8a, 0x4c, 0x9a, 0xa1, 0x35, 0xd9, 0x0d, + 0xaf, 0xe3, 0xaf, 0x53, 0xda, 0x42, 0x3d, 0x71, 0xbb, 0xaa, 0xd1, 0x9e, 0x66, 0xad, 0xe3, 0x6e, + 0x78, 0x1d, 0x7f, 0x83, 0x72, 0x48, 0x3d, 0x71, 0xbb, 0xa6, 0x73, 0x8e, 0xb8, 0x8e, 0x77, 0xe0, + 0x42, 0x28, 0x2e, 0xb6, 0x47, 0x9d, 0x83, 0xe7, 0x66, 0xb7, 0xa4, 0xd1, 0xf0, 0xf8, 0x50, 0x56, + 0x24, 0xe3, 0x7c, 0x20, 0x44, 0xee, 0x60, 0x33, 0xb9, 0x07, 0xaf, 0x87, 0x03, 0xa5, 0xcb, 0xac, + 0xd2, 0x78, 0x89, 0xcc, 0xc5, 0x60, 0xcc, 0x0c, 0x51, 0x05, 0x07, 0xec, 0x52, 0x75, 0x1a, 0x40, + 0x7d, 0xaa, 0xef, 0x89, 0x39, 0xf5, 0x67, 0xe0, 0xe2, 0x74, 0x28, 0x75, 0xc9, 0xeb, 0x34, 0xa2, + 0x22, 0xf9, 0x42, 0x38, 0xaa, 0x4e, 0xd1, 0x67, 0xf4, 0x5d, 0xa3, 0x21, 0x56, 0xa4, 0x4f, 0xf5, + 0x7e, 0x1f, 0x4a, 0x53, 0xc1, 0xd6, 0x65, 0xdf, 0xa1, 0x31, 0x17, 0xd9, 0xaf, 0x85, 0xe2, 0x6e, + 0x98, 0x3c, 0xa3, 0xeb, 0xbb, 0x34, 0x08, 0x0b, 0xe4, 0xa9, 0x9e, 0x71, 0xc9, 0x82, 0xe1, 0xd8, + 0xe5, 0xde, 0xa3, 0x51, 0x99, 0x2f, 0x59, 0x20, 0x32, 0x8b, 0xfd, 0x86, 0xe2, 0xb3, 0xcb, 0xad, + 0xd3, 0x30, 0xcd, 0xfb, 0x0d, 0x86, 0x6a, 0x4e, 0x7e, 0x9b, 0x92, 0x77, 0x67, 0xcf, 0xf8, 0xc7, + 0x09, 0x1a, 0x60, 0x39, 0x7b, 0x77, 0xd6, 0x94, 0x3d, 0xf6, 0x8c, 0x29, 0xff, 0x84, 0xb2, 0x89, + 0xc0, 0x9e, 0x9a, 0xf3, 0x63, 0x98, 0x73, 0x6f, 0x75, 0xbd, 0xb1, 0x7d, 0x34, 0x2a, 0x35, 0x55, + 0xb9, 0x02, 0xda, 0x95, 0xa9, 0xec, 0xc7, 0xbd, 0xe4, 0x6d, 0x50, 0x94, 0x11, 0x24, 0x31, 0x2b, + 0xcc, 0x2e, 0xb3, 0xb2, 0xa3, 0x26, 0x22, 0xac, 0x30, 0x94, 0x67, 0x45, 0x20, 0x51, 0x2b, 0xae, + 0xd3, 0x67, 0x56, 0x3e, 0x50, 0xa5, 0x99, 0x56, 0xdc, 0x10, 0xc0, 0xad, 0x04, 0x48, 0x4b, 0xeb, + 0x7e, 0xbe, 0x85, 0xed, 0xe4, 0x8b, 0xe1, 0x04, 0x6c, 0x03, 0xef, 0xcf, 0xc1, 0x4a, 0x46, 0x13, + 0x06, 0x37, 0x4d, 0xfb, 0xd9, 0x08, 0x5a, 0x60, 0x34, 0xd3, 0xb4, 0x9f, 0x9b, 0x41, 0x2b, 0xff, + 0xa6, 0x04, 0x49, 0x9a, 0x4f, 0x92, 0x2c, 0x24, 0xdf, 0x6b, 0x6d, 0x3e, 0x56, 0xce, 0xd1, 0x5f, + 0x0f, 0x5b, 0xad, 0xa7, 0x8a, 0x44, 0x72, 0x90, 0x7a, 0xf8, 0x95, 0xbd, 0xc6, 0xae, 0x22, 0x93, + 0x22, 0xe4, 0x9b, 0x9b, 0xdb, 0x1b, 0x0d, 0x63, 0xc7, 0xd8, 0xdc, 0xde, 0x53, 0x12, 0xb4, 0xad, + 0xf9, 0xb4, 0xf5, 0x60, 0x4f, 0x49, 0x92, 0x0c, 0x24, 0x68, 0x5d, 0x8a, 0x00, 0xa4, 0x77, 0xf7, + 0x8c, 0xcd, 0xed, 0x0d, 0x25, 0x4d, 0xad, 0xec, 0x6d, 0x6e, 0x35, 0x94, 0x0c, 0x45, 0xee, 0xbd, + 0xbb, 0xf3, 0xb4, 0xa1, 0x64, 0xe9, 0xcf, 0x07, 0x86, 0xf1, 0xe0, 0x2b, 0x4a, 0x8e, 0x92, 0xb6, + 0x1e, 0xec, 0x28, 0x80, 0xcd, 0x0f, 0x1e, 0x3e, 0x6d, 0x28, 0x79, 0x52, 0x80, 0x6c, 0xf3, 0xdd, + 0xed, 0x47, 0x7b, 0x9b, 0xad, 0x6d, 0xa5, 0x50, 0x3e, 0x81, 0x12, 0x5b, 0xe6, 0xc0, 0x2a, 0xb2, + 0xa4, 0xf0, 0x1d, 0x48, 0xb1, 0x9d, 0x91, 0x50, 0x25, 0x95, 0xf0, 0xce, 0x4c, 0x53, 0x56, 0xd8, + 0x1e, 0x31, 0xda, 0xd2, 0x65, 0x48, 0xb1, 0x55, 0x5a, 0x84, 0x14, 0x5b, 0x1d, 0x19, 0x53, 0x45, + 0x56, 0x28, 0xff, 0x96, 0x0c, 0xb0, 0x61, 0xef, 0x3e, 0xef, 0x8f, 0x30, 0x21, 0xbf, 0x0c, 0x30, + 0x79, 0xde, 0x1f, 0xb5, 0x51, 0xf5, 0x3c, 0xa9, 0xcc, 0xd1, 0x1a, 0xf4, 0x77, 0xe4, 0x1a, 0x14, + 0xb0, 0xf9, 0x90, 0x79, 0x21, 0xcc, 0x25, 0x33, 0x46, 0x9e, 0xd6, 0x71, 0xc7, 0x14, 0x84, 0xd4, + 0x74, 0x4c, 0x21, 0xd3, 0x02, 0xa4, 0xa6, 0x93, 0xab, 0x80, 0xc5, 0xf6, 0x04, 0x23, 0x0a, 0xa6, + 0x8d, 0x39, 0x03, 0xfb, 0x65, 0x31, 0x86, 0xbc, 0x0d, 0xd8, 0x27, 0x9b, 0x77, 0x71, 0xfa, 0x74, + 0xb8, 0xc3, 0x5d, 0xa1, 0x3f, 0xd8, 0x6c, 0x7d, 0xc2, 0x52, 0x0b, 0x72, 0x5e, 0x3d, 0xed, 0x0b, + 0x6b, 0xf9, 0x8c, 0x14, 0x9c, 0x11, 0x60, 0x95, 0x37, 0x25, 0x06, 0xe0, 0xa3, 0x59, 0xc0, 0xd1, + 0x30, 0x12, 0x1b, 0x4e, 0xf9, 0x32, 0xcc, 0x6d, 0xdb, 0x16, 0x3b, 0xbd, 0xb8, 0x4a, 0x05, 0x90, + 0x3a, 0x25, 0x09, 0xb3, 0x27, 0xa9, 0x53, 0xbe, 0x02, 0x20, 0xb4, 0x29, 0x20, 0xed, 0xb3, 0x36, + 0xf4, 0x01, 0xd2, 0x7e, 0xf9, 0x26, 0xa4, 0xb7, 0x3a, 0xc7, 0x7b, 0x9d, 0x1e, 0xb9, 0x06, 0x30, + 0xe8, 0x4c, 0x9c, 0xf6, 0x21, 0xee, 0xc3, 0xe7, 0x9f, 0x7f, 0xfe, 0xb9, 0x84, 0x97, 0xbd, 0x1c, + 0xad, 0x65, 0xfb, 0xf1, 0x02, 0xa0, 0x35, 0xe8, 0x6e, 0x99, 0x93, 0x49, 0xa7, 0x67, 0x92, 0x2a, + 0xa4, 0x2d, 0x73, 0x42, 0xa3, 0x9d, 0x84, 0xef, 0x08, 0xcb, 0xfe, 0x2a, 0xf8, 0xa8, 0x95, 0x6d, + 0x84, 0x18, 0x1c, 0x4a, 0x14, 0x48, 0x58, 0x47, 0x43, 0x7c, 0x27, 0x49, 0x19, 0xf4, 0xe7, 0xd2, + 0x25, 0x48, 0x33, 0x0c, 0x21, 0x90, 0xb4, 0x3a, 0x43, 0xb3, 0xc4, 0xfa, 0xc5, 0xdf, 0xe5, 0x5f, + 0x95, 0x00, 0xb6, 0xcd, 0x97, 0x67, 0xe8, 0xd3, 0x47, 0xc5, 0xf4, 0x99, 0x60, 0x7d, 0xde, 0x8f, + 0xeb, 0x93, 0xea, 0xec, 0xd0, 0xb6, 0xbb, 0x6d, 0xb6, 0xc5, 0xec, 0x49, 0x27, 0x47, 0x6b, 0x70, + 0xd7, 0xca, 0x1f, 0x40, 0x61, 0xd3, 0xb2, 0xcc, 0xb1, 0x3b, 0x26, 0x02, 0xc9, 0x67, 0xf6, 0xc4, + 0xe1, 0x6f, 0x4b, 0xf8, 0x9b, 0x94, 0x20, 0x39, 0xb2, 0xc7, 0x0e, 0x9b, 0x67, 0x3d, 0xa9, 0xaf, + 0xae, 0xae, 0x1a, 0x58, 0x43, 0x2e, 0x41, 0xee, 0xc0, 0xb6, 0x2c, 0xf3, 0x80, 0x4e, 0x22, 0x81, + 0x69, 0x8d, 0x5f, 0x51, 0xfe, 0x65, 0x09, 0x0a, 0x2d, 0xe7, 0x99, 0x6f, 0x5c, 0x81, 0xc4, 0x73, + 0xf3, 0x04, 0x87, 0x97, 0x30, 0xe8, 0x4f, 0x7a, 0x54, 0x7e, 0xbe, 0x33, 0x38, 0x62, 0x6f, 0x4d, + 0x05, 0x83, 0x15, 0xc8, 0x05, 0x48, 0xbf, 0x34, 0xfb, 0xbd, 0x67, 0x0e, 0xda, 0x94, 0x0d, 0x5e, + 0x22, 0xb7, 0x20, 0xd5, 0xa7, 0x83, 0x2d, 0x25, 0x71, 0xbd, 0x2e, 0xf8, 0xeb, 0x25, 0xce, 0xc1, + 0x60, 0xa0, 0x1b, 0xd9, 0x6c, 0x57, 0xf9, 0xe8, 0xa3, 0x8f, 0x3e, 0x92, 0xcb, 0x87, 0xb0, 0xe8, + 0x1e, 0xde, 0xc0, 0x64, 0xb7, 0xa1, 0x34, 0x30, 0xed, 0xf6, 0x61, 0xdf, 0xea, 0x0c, 0x06, 0x27, + 0xed, 0x97, 0xb6, 0xd5, 0xee, 0x58, 0x6d, 0x7b, 0x72, 0xd0, 0x19, 0xe3, 0x02, 0x44, 0x77, 0xb1, + 0x38, 0x30, 0xed, 0x26, 0xa3, 0xbd, 0x6f, 0x5b, 0x0f, 0xac, 0x16, 0xe5, 0x94, 0xff, 0x20, 0x09, + 0xb9, 0xad, 0x13, 0xd7, 0xfa, 0x22, 0xa4, 0x0e, 0xec, 0x23, 0x8b, 0xad, 0x65, 0xca, 0x60, 0x05, + 0x6f, 0x8f, 0x64, 0x61, 0x8f, 0x16, 0x21, 0xf5, 0xe2, 0xc8, 0x76, 0x4c, 0x9c, 0x6e, 0xce, 0x60, + 0x05, 0xba, 0x5a, 0x23, 0xd3, 0x29, 0x25, 0x31, 0xb9, 0xa5, 0x3f, 0xfd, 0xf9, 0xa7, 0xce, 0x30, + 0x7f, 0xb2, 0x02, 0x69, 0x9b, 0xae, 0xfe, 0xa4, 0x94, 0xc6, 0x77, 0x35, 0x01, 0x2e, 0xee, 0x8a, + 0xc1, 0x51, 0x64, 0x13, 0x16, 0x5e, 0x9a, 0xed, 0xe1, 0xd1, 0xc4, 0x69, 0xf7, 0xec, 0x76, 0xd7, + 0x34, 0x47, 0xe6, 0xb8, 0x34, 0x87, 0x3d, 0x09, 0x3e, 0x61, 0xd6, 0x42, 0x1a, 0xf3, 0x2f, 0xcd, + 0xad, 0xa3, 0x89, 0xb3, 0x61, 0x3f, 0x46, 0x16, 0xa9, 0x42, 0x6e, 0x6c, 0x52, 0x4f, 0x40, 0x07, + 0x5b, 0x08, 0xf7, 0x1e, 0xa0, 0x66, 0xc7, 0xe6, 0x08, 0x2b, 0xc8, 0x3a, 0x64, 0xf7, 0xfb, 0xcf, + 0xcd, 0xc9, 0x33, 0xb3, 0x5b, 0xca, 0xa8, 0x52, 0x65, 0x5e, 0xbb, 0xe8, 0x73, 0xbc, 0x65, 0x5d, + 0x79, 0x64, 0x0f, 0xec, 0xb1, 0xe1, 0x41, 0xc9, 0x7d, 0xc8, 0x4d, 0xec, 0xa1, 0xc9, 0xf4, 0x9d, + 0xc5, 0xa0, 0x7a, 0x79, 0x16, 0x6f, 0xd7, 0x1e, 0x9a, 0xae, 0x07, 0x73, 0xf1, 0x64, 0x99, 0x0d, + 0x74, 0x9f, 0x5e, 0x9d, 0x4b, 0x80, 0x4f, 0x03, 0x74, 0x40, 0x78, 0x95, 0x26, 0x4b, 0x74, 0x40, + 0xbd, 0x43, 0x7a, 0x23, 0x2a, 0xe5, 0x31, 0xaf, 0xf4, 0xca, 0x4b, 0xb7, 0x20, 0xe7, 0x19, 0xf4, + 0x5d, 0x1f, 0x73, 0x37, 0x39, 0xf4, 0x07, 0xcc, 0xf5, 0x31, 0x5f, 0xf3, 0x06, 0xa4, 0x70, 0xd8, + 0x34, 0x42, 0x19, 0x0d, 0x1a, 0x10, 0x73, 0x90, 0xda, 0x30, 0x1a, 0x8d, 0x6d, 0x45, 0xc2, 0xd8, + 0xf8, 0xf4, 0xdd, 0x86, 0x22, 0x0b, 0x8a, 0xfd, 0x6d, 0x09, 0x12, 0x8d, 0x63, 0x54, 0x0b, 0x9d, + 0x86, 0x7b, 0xa2, 0xe9, 0x6f, 0xad, 0x06, 0xc9, 0xa1, 0x3d, 0x36, 0xc9, 0xf9, 0x19, 0xb3, 0x2c, + 0xf5, 0x70, 0xbf, 0x84, 0x57, 0xe4, 0xc6, 0xb1, 0x63, 0x20, 0x5e, 0x7b, 0x0b, 0x92, 0x8e, 0x79, + 0xec, 0xcc, 0xe6, 0x3d, 0x63, 0x1d, 0x50, 0x80, 0x76, 0x13, 0xd2, 0xd6, 0xd1, 0x70, 0xdf, 0x1c, + 0xcf, 0x86, 0xf6, 0x71, 0x7a, 0x1c, 0x52, 0x7e, 0x0f, 0x94, 0x47, 0xf6, 0x70, 0x34, 0x30, 0x8f, + 0x1b, 0xc7, 0x8e, 0x69, 0x4d, 0xfa, 0xb6, 0x45, 0xf5, 0x7c, 0xd8, 0x1f, 0xa3, 0x17, 0xc1, 0xb7, + 0x62, 0x2c, 0xd0, 0x53, 0x3d, 0x31, 0x0f, 0x6c, 0xab, 0xcb, 0x1d, 0x26, 0x2f, 0x51, 0xb4, 0xf3, + 0xac, 0x3f, 0xa6, 0x0e, 0x84, 0xfa, 0x79, 0x56, 0x28, 0x6f, 0x40, 0x91, 0xe7, 0x18, 0x13, 0xde, + 0x71, 0xf9, 0x06, 0x14, 0xdc, 0x2a, 0x7c, 0x38, 0xcf, 0x42, 0xf2, 0x83, 0x86, 0xd1, 0x52, 0xce, + 0xd1, 0x65, 0x6d, 0x6d, 0x37, 0x14, 0x89, 0xfe, 0xd8, 0x7b, 0xbf, 0x15, 0x58, 0xca, 0x4b, 0x50, + 0xf0, 0xc6, 0xbe, 0x6b, 0x3a, 0xd8, 0x42, 0x03, 0x42, 0xa6, 0x2e, 0x67, 0xa5, 0x72, 0x06, 0x52, + 0x8d, 0xe1, 0xc8, 0x39, 0x29, 0xff, 0x22, 0xe4, 0x39, 0xe8, 0x69, 0x7f, 0xe2, 0x90, 0x3b, 0x90, + 0x19, 0xf2, 0xf9, 0x4a, 0x78, 0xdd, 0x13, 0x35, 0xe5, 0xe3, 0xdc, 0xdf, 0x86, 0x8b, 0x5e, 0xaa, + 0x42, 0x46, 0xf0, 0xa5, 0xfc, 0xa8, 0xcb, 0xe2, 0x51, 0x67, 0x4e, 0x21, 0x21, 0x38, 0x85, 0xf2, + 0x16, 0x64, 0x58, 0x04, 0x9c, 0x60, 0x54, 0x67, 0xa9, 0x22, 0x13, 0x13, 0xdb, 0xf9, 0x3c, 0xab, + 0x63, 0x17, 0x95, 0xab, 0x90, 0x47, 0xc1, 0x72, 0x04, 0x73, 0x9d, 0x80, 0x55, 0x4c, 0x6e, 0xbf, + 0x9f, 0x82, 0xac, 0xbb, 0x52, 0x64, 0x19, 0xd2, 0x2c, 0x3f, 0x43, 0x53, 0xee, 0xfb, 0x41, 0x0a, + 0x33, 0x32, 0xb2, 0x0c, 0x19, 0x9e, 0x83, 0x71, 0xef, 0x2e, 0x57, 0x35, 0x23, 0xcd, 0x72, 0x2e, + 0xaf, 0xb1, 0xa6, 0xa3, 0x63, 0x62, 0x2f, 0x03, 0x69, 0x96, 0x55, 0x11, 0x15, 0x72, 0x5e, 0x1e, + 0x85, 0xfe, 0x98, 0x3f, 0x03, 0x64, 0xdd, 0xc4, 0x49, 0x40, 0xd4, 0x74, 0xf4, 0x58, 0x3c, 0xe7, + 0xcf, 0x36, 0xfd, 0xeb, 0x49, 0xd6, 0xcd, 0x86, 0xf0, 0xf9, 0xde, 0x4d, 0xf0, 0x33, 0x3c, 0xff, + 0xf1, 0x01, 0x35, 0x1d, 0x5d, 0x82, 0x9b, 0xcd, 0x67, 0x78, 0x8e, 0x43, 0xae, 0xd2, 0x21, 0x62, + 0xce, 0x82, 0x47, 0xdf, 0x4f, 0xdd, 0xd3, 0x2c, 0x93, 0x21, 0xd7, 0xa8, 0x05, 0x96, 0x98, 0xe0, + 0xb9, 0xf4, 0xf3, 0xf4, 0x0c, 0xcf, 0x57, 0xc8, 0x4d, 0x0a, 0x61, 0xcb, 0x5f, 0x82, 0x88, 0xa4, + 0x3c, 0xc3, 0x93, 0x72, 0xa2, 0xd2, 0x0e, 0xd1, 0x3d, 0xa0, 0x4b, 0x10, 0x12, 0xf0, 0x34, 0x4b, + 0xc0, 0xc9, 0x15, 0x34, 0xc7, 0x26, 0x55, 0xf0, 0x93, 0xed, 0x0c, 0x4f, 0x70, 0xfc, 0x76, 0xbc, + 0xb2, 0x79, 0x89, 0x75, 0x86, 0xa7, 0x30, 0xa4, 0x46, 0xf7, 0x8b, 0xea, 0xbb, 0x34, 0x8f, 0x4e, + 0xb0, 0xe4, 0x0b, 0xcf, 0xdd, 0x53, 0xe6, 0x03, 0xeb, 0xcc, 0x83, 0x18, 0xa9, 0x26, 0x9e, 0x86, + 0x25, 0xca, 0xdb, 0xe9, 0x5b, 0x87, 0xa5, 0x22, 0xae, 0x44, 0xa2, 0x6f, 0x1d, 0x1a, 0xa9, 0x26, + 0xad, 0x61, 0x1a, 0xd8, 0xa6, 0x6d, 0x0a, 0xb6, 0x25, 0x6f, 0xb3, 0x46, 0x5a, 0x45, 0x4a, 0x90, + 0x6a, 0xb6, 0xb7, 0x3b, 0x56, 0x69, 0x81, 0xf1, 0xac, 0x8e, 0x65, 0x24, 0x9b, 0xdb, 0x1d, 0x8b, + 0xbc, 0x05, 0x89, 0xc9, 0xd1, 0x7e, 0x89, 0x84, 0xbf, 0xac, 0xec, 0x1e, 0xed, 0xbb, 0x43, 0x31, + 0x28, 0x82, 0x2c, 0x43, 0x76, 0xe2, 0x8c, 0xdb, 0xbf, 0x60, 0x8e, 0xed, 0xd2, 0x79, 0x5c, 0xc2, + 0x73, 0x46, 0x66, 0xe2, 0x8c, 0x3f, 0x30, 0xc7, 0xf6, 0x19, 0x9d, 0x5f, 0xf9, 0x0a, 0xe4, 0x05, + 0xbb, 0xa4, 0x08, 0x92, 0xc5, 0x6e, 0x0a, 0x75, 0xe9, 0x8e, 0x21, 0x59, 0xe5, 0x3d, 0x28, 0xb8, + 0x39, 0x0c, 0xce, 0x57, 0xa3, 0x27, 0x69, 0x60, 0x8f, 0xf1, 0x7c, 0xce, 0x6b, 0x97, 0xc4, 0x10, + 0xe5, 0xc3, 0x78, 0xb8, 0x60, 0xd0, 0xb2, 0x12, 0x1a, 0x8a, 0x54, 0xfe, 0xa1, 0x04, 0x85, 0x2d, + 0x7b, 0xec, 0x3f, 0x30, 0x2f, 0x42, 0x6a, 0xdf, 0xb6, 0x07, 0x13, 0x34, 0x9b, 0x35, 0x58, 0x81, + 0xbc, 0x01, 0x05, 0xfc, 0xe1, 0xe6, 0x9e, 0xb2, 0xf7, 0xb4, 0x91, 0xc7, 0x7a, 0x9e, 0x70, 0x12, + 0x48, 0xf6, 0x2d, 0x67, 0xc2, 0x3d, 0x19, 0xfe, 0x26, 0x5f, 0x80, 0x3c, 0xfd, 0xeb, 0x32, 0x93, + 0xde, 0x85, 0x15, 0x68, 0x35, 0x27, 0xbe, 0x05, 0x73, 0xb8, 0xfb, 0x1e, 0x2c, 0xe3, 0x3d, 0x63, + 0x14, 0x58, 0x03, 0x07, 0x96, 0x20, 0xc3, 0x5c, 0xc1, 0x04, 0xbf, 0x96, 0xe5, 0x0c, 0xb7, 0x48, + 0xdd, 0x2b, 0x66, 0x02, 0x2c, 0xdc, 0x67, 0x0c, 0x5e, 0x2a, 0x3f, 0x80, 0x2c, 0x46, 0xa9, 0xd6, + 0xa0, 0x4b, 0xca, 0x20, 0xf5, 0x4a, 0x26, 0xc6, 0xc8, 0x45, 0xe1, 0x9a, 0xcf, 0x9b, 0x57, 0x36, + 0x0c, 0xa9, 0xb7, 0xb4, 0x00, 0xd2, 0x06, 0xbd, 0x77, 0x1f, 0x73, 0x37, 0x2d, 0x1d, 0x97, 0x5b, + 0xdc, 0xc4, 0xb6, 0xf9, 0x32, 0xce, 0xc4, 0xb6, 0xf9, 0x92, 0x99, 0xb8, 0x3a, 0x65, 0x82, 0x96, + 0x4e, 0xf8, 0xa7, 0x43, 0xe9, 0xa4, 0x5c, 0x85, 0x39, 0x3c, 0x9e, 0x7d, 0xab, 0xb7, 0x63, 0xf7, + 0x2d, 0xbc, 0xe7, 0x1f, 0xe2, 0x3d, 0x49, 0x32, 0xa4, 0x43, 0xba, 0x07, 0xe6, 0x71, 0xe7, 0x80, + 0xdd, 0x38, 0xb3, 0x06, 0x2b, 0x94, 0x3f, 0x4b, 0xc2, 0x3c, 0x77, 0xad, 0xef, 0xf7, 0x9d, 0x67, + 0x5b, 0x9d, 0x11, 0x79, 0x0a, 0x05, 0xea, 0x55, 0xdb, 0xc3, 0xce, 0x68, 0x44, 0x8f, 0xaf, 0x84, + 0x57, 0x8d, 0xeb, 0x53, 0xae, 0x9a, 0xe3, 0x57, 0xb6, 0x3b, 0x43, 0x73, 0x8b, 0x61, 0x1b, 0x96, + 0x33, 0x3e, 0x31, 0xf2, 0x96, 0x5f, 0x43, 0x36, 0x21, 0x3f, 0x9c, 0xf4, 0x3c, 0x63, 0x32, 0x1a, + 0xab, 0x44, 0x1a, 0xdb, 0x9a, 0xf4, 0x02, 0xb6, 0x60, 0xe8, 0x55, 0xd0, 0x81, 0x51, 0x7f, 0xec, + 0xd9, 0x4a, 0x9c, 0x32, 0x30, 0xea, 0x3a, 0x82, 0x03, 0xdb, 0xf7, 0x6b, 0xc8, 0x63, 0x00, 0x7a, + 0xbc, 0x1c, 0x9b, 0xa6, 0x4e, 0xa8, 0xa0, 0xbc, 0xf6, 0x66, 0xa4, 0xad, 0x5d, 0x67, 0xbc, 0x67, + 0xef, 0x3a, 0x63, 0x66, 0x88, 0x1e, 0x4c, 0x2c, 0x2e, 0xbd, 0x03, 0x4a, 0x78, 0xfe, 0xe2, 0x8d, + 0x3c, 0x35, 0xe3, 0x46, 0x9e, 0xe3, 0x37, 0xf2, 0xba, 0x7c, 0x57, 0x5a, 0x7a, 0x0f, 0x8a, 0xa1, + 0x29, 0x8b, 0x74, 0xc2, 0xe8, 0xb7, 0x45, 0x7a, 0x5e, 0x7b, 0x5d, 0xf8, 0x9c, 0x2d, 0x6e, 0xb8, + 0x68, 0xf7, 0x1d, 0x50, 0xc2, 0xd3, 0x17, 0x0d, 0x67, 0x63, 0x32, 0x05, 0xe4, 0xdf, 0x87, 0xb9, + 0xc0, 0x94, 0x45, 0x72, 0xee, 0x94, 0x49, 0x95, 0x7f, 0x29, 0x05, 0xa9, 0x96, 0x65, 0xda, 0x87, + 0xe4, 0xf5, 0x60, 0x9c, 0x7c, 0x72, 0xce, 0x8d, 0x91, 0x17, 0x43, 0x31, 0xf2, 0xc9, 0x39, 0x2f, + 0x42, 0x5e, 0x0c, 0x45, 0x48, 0xb7, 0xa9, 0xa6, 0x93, 0xcb, 0x53, 0xf1, 0xf1, 0xc9, 0x39, 0x21, + 0x38, 0x5e, 0x9e, 0x0a, 0x8e, 0x7e, 0x73, 0x4d, 0xa7, 0x0e, 0x35, 0x18, 0x19, 0x9f, 0x9c, 0xf3, + 0xa3, 0xe2, 0x72, 0x38, 0x2a, 0x7a, 0x8d, 0x35, 0x9d, 0x0d, 0x49, 0x88, 0x88, 0x38, 0x24, 0x16, + 0x0b, 0x97, 0xc3, 0xb1, 0x10, 0x79, 0x3c, 0x0a, 0x2e, 0x87, 0xa3, 0x20, 0x36, 0xf2, 0xa8, 0x77, + 0x31, 0x14, 0xf5, 0xd0, 0x28, 0x0b, 0x77, 0xcb, 0xe1, 0x70, 0xc7, 0x78, 0xc2, 0x48, 0xc5, 0x58, + 0xe7, 0x35, 0xd6, 0x74, 0xa2, 0x85, 0x02, 0x5d, 0xf4, 0x6d, 0x1f, 0xf7, 0x02, 0x9d, 0xbe, 0x4e, + 0x97, 0xcd, 0xbd, 0x88, 0x16, 0x63, 0xbe, 0xf8, 0xe3, 0x6a, 0xba, 0x17, 0x31, 0x0d, 0x32, 0x87, + 0x3c, 0x01, 0x56, 0xd0, 0x73, 0x09, 0xb2, 0xc4, 0xcd, 0x5f, 0x69, 0xb6, 0xd1, 0x83, 0xd1, 0x79, + 0x1d, 0xb2, 0x3b, 0x7d, 0x05, 0xe6, 0x9a, 0xed, 0xa7, 0x9d, 0x71, 0xcf, 0x9c, 0x38, 0xed, 0xbd, + 0x4e, 0xcf, 0x7b, 0x44, 0xa0, 0xfb, 0x9f, 0x6f, 0xf2, 0x96, 0xbd, 0x4e, 0x8f, 0x5c, 0x70, 0xc5, + 0xd5, 0xc5, 0x56, 0x89, 0xcb, 0x6b, 0xe9, 0x75, 0xba, 0x68, 0xcc, 0x18, 0xfa, 0xc2, 0x05, 0xee, + 0x0b, 0x1f, 0x66, 0x20, 0x75, 0x64, 0xf5, 0x6d, 0xeb, 0x61, 0x0e, 0x32, 0x8e, 0x3d, 0x1e, 0x76, + 0x1c, 0xbb, 0xfc, 0x23, 0x09, 0xe0, 0x91, 0x3d, 0x1c, 0x1e, 0x59, 0xfd, 0x17, 0x47, 0x26, 0xb9, + 0x02, 0xf9, 0x61, 0xe7, 0xb9, 0xd9, 0x1e, 0x9a, 0xed, 0x83, 0xb1, 0x7b, 0x0e, 0x72, 0xb4, 0x6a, + 0xcb, 0x7c, 0x34, 0x3e, 0x21, 0x25, 0xf7, 0x8a, 0x8e, 0xda, 0x41, 0x49, 0xf2, 0x2b, 0xfb, 0x22, + 0xbf, 0x74, 0xa6, 0xf9, 0x1e, 0xba, 0xd7, 0x4e, 0x96, 0x47, 0x64, 0xf8, 0xee, 0x61, 0x89, 0x4a, + 0xde, 0x31, 0x87, 0xa3, 0xf6, 0x01, 0x4a, 0x85, 0xca, 0x21, 0x45, 0xcb, 0x8f, 0xc8, 0x6d, 0x48, + 0x1c, 0xd8, 0x03, 0x14, 0xc9, 0x29, 0xfb, 0x42, 0x71, 0xe4, 0x0d, 0x48, 0x0c, 0x27, 0x4c, 0x36, + 0x79, 0x6d, 0x41, 0xb8, 0x27, 0xb0, 0xd0, 0x44, 0x61, 0xc3, 0x49, 0xcf, 0x9b, 0xf7, 0x8d, 0x22, + 0x24, 0x9a, 0xad, 0x16, 0x8d, 0xfd, 0xcd, 0x56, 0x6b, 0x4d, 0x91, 0xea, 0x5f, 0x82, 0x6c, 0x6f, + 0x6c, 0x9a, 0xd4, 0x3d, 0xcc, 0xce, 0x39, 0x3e, 0xc4, 0x58, 0xe7, 0x81, 0xea, 0x5b, 0x90, 0x39, + 0x60, 0x59, 0x07, 0x89, 0x48, 0x6b, 0x4b, 0x7f, 0xc8, 0x1e, 0x55, 0x96, 0xfc, 0xe6, 0x70, 0x9e, + 0x62, 0xb8, 0x36, 0xea, 0x3b, 0x90, 0x1b, 0xb7, 0x4f, 0x33, 0xf8, 0x31, 0x8b, 0x2e, 0x71, 0x06, + 0xb3, 0x63, 0x5e, 0x55, 0x6f, 0xc0, 0x82, 0x65, 0xbb, 0xdf, 0x50, 0xda, 0x5d, 0x76, 0xc6, 0x2e, + 0x4e, 0x5f, 0xe5, 0x5c, 0xe3, 0x26, 0xfb, 0x6e, 0x69, 0xd9, 0xbc, 0x81, 0x9d, 0xca, 0xfa, 0x23, + 0x50, 0x04, 0x33, 0x98, 0x7a, 0xc6, 0x59, 0x39, 0x64, 0x1f, 0x4a, 0x3d, 0x2b, 0x78, 0xee, 0x43, + 0x46, 0xd8, 0xc9, 0x8c, 0x31, 0xd2, 0x63, 0x5f, 0x9d, 0x3d, 0x23, 0xe8, 0xea, 0xa6, 0x8d, 0x50, + 0x5f, 0x13, 0x6d, 0xe4, 0x19, 0xfb, 0x20, 0x2d, 0x1a, 0xa9, 0xe9, 0xa1, 0x55, 0x39, 0x3a, 0x75, + 0x28, 0x7d, 0xf6, 0x3d, 0xd9, 0xb3, 0xc2, 0x1c, 0xe0, 0x0c, 0x33, 0xf1, 0x83, 0xf9, 0x90, 0x7d, + 0x6a, 0x0e, 0x98, 0x99, 0x1a, 0xcd, 0xe4, 0xd4, 0xd1, 0x3c, 0x67, 0xdf, 0x75, 0x3d, 0x33, 0xbb, + 0xb3, 0x46, 0x33, 0x39, 0x75, 0x34, 0x03, 0xf6, 0xc5, 0x37, 0x60, 0xa6, 0xa6, 0xd7, 0x37, 0x80, + 0x88, 0x5b, 0xcd, 0xe3, 0x44, 0x8c, 0x9d, 0x21, 0xfb, 0x8e, 0xef, 0x6f, 0x36, 0xa3, 0xcc, 0x32, + 0x14, 0x3f, 0x20, 0x8b, 0x7d, 0xe2, 0x0f, 0x1a, 0xaa, 0xe9, 0xf5, 0x4d, 0x38, 0x2f, 0x4e, 0xec, + 0x0c, 0x43, 0xb2, 0x55, 0xa9, 0x52, 0x34, 0x16, 0xfc, 0xa9, 0x71, 0xce, 0x4c, 0x53, 0xf1, 0x83, + 0x1a, 0xa9, 0x52, 0x45, 0x99, 0x32, 0x55, 0xd3, 0xeb, 0x0f, 0xa0, 0x28, 0x98, 0xda, 0xc7, 0x08, + 0x1d, 0x6d, 0xe6, 0x05, 0xfb, 0x5f, 0x0b, 0xcf, 0x0c, 0x8d, 0xe8, 0xe1, 0x1d, 0xe3, 0x31, 0x2e, + 0xda, 0xc8, 0x98, 0xfd, 0xa3, 0x80, 0x3f, 0x16, 0x64, 0x84, 0x8e, 0x04, 0xe6, 0xdf, 0x71, 0x56, + 0x26, 0xec, 0x5f, 0x08, 0xfc, 0xa1, 0x50, 0x42, 0xbd, 0x1f, 0x98, 0x8e, 0x49, 0x83, 0x5c, 0x8c, + 0x0d, 0x07, 0x3d, 0xf2, 0x9b, 0x91, 0x80, 0x15, 0xf1, 0x81, 0x44, 0x98, 0x36, 0x2d, 0xd6, 0x37, + 0x61, 0xfe, 0xec, 0x0e, 0xe9, 0x63, 0x89, 0x65, 0xcb, 0xd5, 0x15, 0x9a, 0x50, 0x1b, 0x73, 0xdd, + 0x80, 0x5f, 0x6a, 0xc0, 0xdc, 0x99, 0x9d, 0xd2, 0x27, 0x12, 0xcb, 0x39, 0xa9, 0x25, 0xa3, 0xd0, + 0x0d, 0x7a, 0xa6, 0xb9, 0x33, 0xbb, 0xa5, 0x4f, 0x25, 0xf6, 0x40, 0xa1, 0x6b, 0x9e, 0x11, 0xd7, + 0x33, 0xcd, 0x9d, 0xd9, 0x2d, 0x7d, 0x95, 0x65, 0x94, 0xb2, 0x5e, 0x15, 0x8d, 0xa0, 0x2f, 0x98, + 0x3f, 0xbb, 0x5b, 0xfa, 0x9a, 0x84, 0x8f, 0x15, 0xb2, 0xae, 0x7b, 0xeb, 0xe2, 0x79, 0xa6, 0xf9, + 0xb3, 0xbb, 0xa5, 0xaf, 0x4b, 0xf8, 0xa4, 0x21, 0xeb, 0xeb, 0x01, 0x33, 0xc1, 0xd1, 0x9c, 0xee, + 0x96, 0xbe, 0x21, 0xe1, 0x2b, 0x83, 0xac, 0xd7, 0x3c, 0x33, 0xbb, 0x53, 0xa3, 0x39, 0xdd, 0x2d, + 0x7d, 0x13, 0x6f, 0xf1, 0x75, 0x59, 0xbf, 0x13, 0x30, 0x83, 0x9e, 0xa9, 0xf8, 0x0a, 0x6e, 0xe9, + 0x5b, 0x12, 0x3e, 0x06, 0xc9, 0xfa, 0x5d, 0xc3, 0xed, 0xdd, 0xf7, 0x4c, 0xc5, 0x57, 0x70, 0x4b, + 0x9f, 0x49, 0xf8, 0x66, 0x24, 0xeb, 0xf7, 0x82, 0x86, 0xd0, 0x33, 0x29, 0xaf, 0xe2, 0x96, 0xbe, + 0x4d, 0x2d, 0x15, 0xeb, 0xf2, 0xfa, 0xaa, 0xe1, 0x0e, 0x40, 0xf0, 0x4c, 0xca, 0xab, 0xb8, 0xa5, + 0xef, 0x50, 0x53, 0x4a, 0x5d, 0x5e, 0x5f, 0x0b, 0x99, 0xaa, 0xe9, 0xf5, 0x47, 0x50, 0x38, 0xab, + 0x5b, 0xfa, 0xae, 0xf8, 0x16, 0x97, 0xef, 0x0a, 0xbe, 0x69, 0x47, 0xd8, 0xb3, 0x53, 0x1d, 0xd3, + 0xf7, 0x30, 0xc7, 0xa9, 0xcf, 0x3d, 0x61, 0xef, 0x55, 0x8c, 0xe0, 0x6f, 0x1f, 0x73, 0x53, 0x5b, + 0xfe, 0xf9, 0x38, 0xd5, 0x47, 0x7d, 0x5f, 0xc2, 0x47, 0xad, 0x02, 0x37, 0x88, 0x78, 0xef, 0xa4, + 0x30, 0x87, 0xf5, 0xa1, 0x3f, 0xcb, 0xd3, 0xbc, 0xd5, 0x0f, 0xa4, 0x57, 0x71, 0x57, 0xf5, 0x44, + 0x6b, 0xbb, 0xe1, 0x2d, 0x06, 0xd6, 0xbc, 0x0d, 0xc9, 0x63, 0x6d, 0x75, 0x4d, 0xbc, 0x92, 0x89, + 0x6f, 0xb9, 0xcc, 0x49, 0xe5, 0xb5, 0xa2, 0xf0, 0xdc, 0x3d, 0x1c, 0x39, 0x27, 0x06, 0xb2, 0x38, + 0x5b, 0x8b, 0x64, 0x7f, 0x12, 0xc3, 0xd6, 0x38, 0xbb, 0x1a, 0xc9, 0xfe, 0x34, 0x86, 0x5d, 0xe5, + 0x6c, 0x3d, 0x92, 0xfd, 0xd5, 0x18, 0xb6, 0xce, 0xd9, 0xeb, 0x91, 0xec, 0xaf, 0xc5, 0xb0, 0xd7, + 0x39, 0xbb, 0x16, 0xc9, 0xfe, 0x7a, 0x0c, 0xbb, 0xc6, 0xd9, 0x77, 0x22, 0xd9, 0xdf, 0x88, 0x61, + 0xdf, 0xe1, 0xec, 0xbb, 0x91, 0xec, 0x6f, 0xc6, 0xb0, 0xef, 0x72, 0xf6, 0xbd, 0x48, 0xf6, 0xb7, + 0x62, 0xd8, 0xf7, 0x18, 0x7b, 0x6d, 0x35, 0x92, 0xfd, 0x59, 0x34, 0x7b, 0x6d, 0x95, 0xb3, 0xa3, + 0xb5, 0xf6, 0xed, 0x18, 0x36, 0xd7, 0xda, 0x5a, 0xb4, 0xd6, 0xbe, 0x13, 0xc3, 0xe6, 0x5a, 0x5b, + 0x8b, 0xd6, 0xda, 0x77, 0x63, 0xd8, 0x5c, 0x6b, 0x6b, 0xd1, 0x5a, 0xfb, 0x5e, 0x0c, 0x9b, 0x6b, + 0x6d, 0x2d, 0x5a, 0x6b, 0xdf, 0x8f, 0x61, 0x73, 0xad, 0xad, 0x45, 0x6b, 0xed, 0x07, 0x31, 0x6c, + 0xae, 0xb5, 0xb5, 0x68, 0xad, 0xfd, 0x51, 0x0c, 0x9b, 0x6b, 0x6d, 0x2d, 0x5a, 0x6b, 0x7f, 0x1c, + 0xc3, 0xe6, 0x5a, 0x5b, 0x8b, 0xd6, 0xda, 0x9f, 0xc4, 0xb0, 0xb9, 0xd6, 0xb4, 0x68, 0xad, 0xfd, + 0x69, 0x34, 0x5b, 0xe3, 0x5a, 0xd3, 0xa2, 0xb5, 0xf6, 0x67, 0x31, 0x6c, 0xae, 0x35, 0x2d, 0x5a, + 0x6b, 0x7f, 0x1e, 0xc3, 0xe6, 0x5a, 0xd3, 0xa2, 0xb5, 0xf6, 0xc3, 0x18, 0x36, 0xd7, 0x9a, 0x16, + 0xad, 0xb5, 0xbf, 0x88, 0x61, 0x73, 0xad, 0x69, 0xd1, 0x5a, 0xfb, 0xcb, 0x18, 0x36, 0xd7, 0x9a, + 0x16, 0xad, 0xb5, 0xbf, 0x8a, 0x61, 0x73, 0xad, 0x69, 0xd1, 0x5a, 0xfb, 0xeb, 0x18, 0x36, 0xd7, + 0x9a, 0x16, 0xad, 0xb5, 0xbf, 0x89, 0x61, 0x73, 0xad, 0x69, 0xd1, 0x5a, 0xfb, 0xdb, 0x18, 0x36, + 0xd7, 0x5a, 0x35, 0x5a, 0x6b, 0x7f, 0x17, 0xcd, 0xae, 0x72, 0xad, 0x55, 0xa3, 0xb5, 0xf6, 0xf7, + 0x31, 0x6c, 0xae, 0xb5, 0x6a, 0xb4, 0xd6, 0xfe, 0x21, 0x86, 0xcd, 0xb5, 0x56, 0x8d, 0xd6, 0xda, + 0x3f, 0xc6, 0xb0, 0xb9, 0xd6, 0xaa, 0xd1, 0x5a, 0xfb, 0x51, 0x0c, 0x9b, 0x6b, 0xad, 0x1a, 0xad, + 0xb5, 0x7f, 0x8a, 0x61, 0x73, 0xad, 0x55, 0xa3, 0xb5, 0xf6, 0xcf, 0x31, 0x6c, 0xae, 0xb5, 0x6a, + 0xb4, 0xd6, 0xfe, 0x25, 0x86, 0xcd, 0xb5, 0x56, 0x8d, 0xd6, 0xda, 0xbf, 0xc6, 0xb0, 0xb9, 0xd6, + 0xaa, 0xd1, 0x5a, 0xfb, 0xb7, 0x18, 0x36, 0xd7, 0x9a, 0x1e, 0xad, 0xb5, 0x7f, 0x8f, 0x66, 0xeb, + 0x5c, 0x6b, 0x7a, 0xb4, 0xd6, 0xfe, 0x23, 0x86, 0xcd, 0xb5, 0xa6, 0x47, 0x6b, 0xed, 0x3f, 0x63, + 0xd8, 0x5c, 0x6b, 0x7a, 0xb4, 0xd6, 0xfe, 0x2b, 0x86, 0xcd, 0xb5, 0xa6, 0x47, 0x6b, 0xed, 0xbf, + 0x63, 0xd8, 0x5c, 0x6b, 0x7a, 0xb4, 0xd6, 0xfe, 0x27, 0x86, 0xcd, 0xb5, 0xa6, 0x47, 0x6b, 0xed, + 0xc7, 0x31, 0x6c, 0xae, 0x35, 0x3d, 0x5a, 0x6b, 0x3f, 0x89, 0x61, 0x73, 0xad, 0xe9, 0xd1, 0x5a, + 0xfb, 0xdf, 0x18, 0x36, 0xd7, 0x9a, 0x1e, 0xad, 0xb5, 0xff, 0x8b, 0x61, 0x73, 0xad, 0xad, 0x47, + 0x6b, 0xed, 0xff, 0xa3, 0xd9, 0xeb, 0xab, 0x3f, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x00, 0xcd, + 0x32, 0x57, 0x39, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/proto/testdata/test.proto b/vendor/github.com/golang/protobuf/proto/testdata/test.proto index 3d1cbb675d..70e3cfcda2 100644 --- a/vendor/github.com/golang/protobuf/proto/testdata/test.proto +++ b/vendor/github.com/golang/protobuf/proto/testdata/test.proto @@ -169,6 +169,13 @@ message GoTest { }; } +// For testing a group containing a required field. +message GoTestRequiredGroupField { + required group Group = 1 { + required int32 Field = 2; + }; +} + // For testing skipping of unrecognized fields. // Numbers are all big, larger than tag numbers in GoTestField, // the message used in the corresponding test. @@ -233,6 +240,12 @@ message OtherMessage { optional bytes value = 2; optional float weight = 3; optional InnerMessage inner = 4; + + extensions 100 to max; +} + +message RequiredInnerMessage { + required InnerMessage leo_finally_won_an_oscar = 1; } message MyMessage { @@ -242,6 +255,7 @@ message MyMessage { repeated string pet = 4; optional InnerMessage inner = 5; repeated OtherMessage others = 6; + optional RequiredInnerMessage we_must_go_deeper = 13; repeated InnerMessage rep_inner = 12; enum Color { @@ -277,6 +291,17 @@ extend MyMessage { repeated string greeting = 106; } +message ComplexExtension { + optional int32 first = 1; + optional int32 second = 2; + repeated int32 third = 3; +} + +extend OtherMessage { + optional ComplexExtension complex = 200; + repeated ComplexExtension r_complex = 201; +} + message DefaultsMessage { enum DefaultsEnum { ZERO = 0; @@ -470,6 +495,7 @@ message GroupNew { message FloatingPoint { required double f = 1; + optional bool exact = 2; } message MessageWithMap { @@ -479,6 +505,34 @@ message MessageWithMap { map str_to_str = 4; } +message Oneof { + oneof union { + bool F_Bool = 1; + int32 F_Int32 = 2; + int64 F_Int64 = 3; + fixed32 F_Fixed32 = 4; + fixed64 F_Fixed64 = 5; + uint32 F_Uint32 = 6; + uint64 F_Uint64 = 7; + float F_Float = 8; + double F_Double = 9; + string F_String = 10; + bytes F_Bytes = 11; + sint32 F_Sint32 = 12; + sint64 F_Sint64 = 13; + MyMessage.Color F_Enum = 14; + GoTestField F_Message = 15; + group F_Group = 16 { + optional int32 x = 17; + } + int32 F_Largest_Tag = 536870911; + } + + oneof tormato { + int32 value = 100; + } +} + message Communique { optional bool make_me_cry = 1; diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go index 81e1bdf386..965876bf03 100644 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -154,7 +154,7 @@ func (w *textWriter) indent() { w.ind++ } func (w *textWriter) unindent() { if w.ind == 0 { - log.Printf("proto: textWriter unindented too far") + log.Print("proto: textWriter unindented too far") return } w.ind-- @@ -170,20 +170,98 @@ func writeName(w *textWriter, props *Properties) error { return nil } -var ( - messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem() -) - // raw is the interface satisfied by RawMessage. type raw interface { Bytes() []byte } -func writeStruct(w *textWriter, sv reflect.Value) error { - if sv.Type() == messageSetType { - return writeMessageSet(w, sv.Addr().Interface().(*MessageSet)) +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") } + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } st := sv.Type() sprops := GetProperties(st) for i := 0; i < sv.NumField(); i++ { @@ -235,7 +313,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { } continue } - if err := writeAny(w, v, props); err != nil { + if err := tm.writeAny(w, v, props); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -277,7 +355,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := writeAny(w, key, props.mkeyprop); err != nil { + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -294,7 +372,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := writeAny(w, val, props.mvalprop); err != nil { + if err := tm.writeAny(w, val, props.mvalprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -366,7 +444,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { } // Enums have a String method, so writeAny will work fine. - if err := writeAny(w, fv, props); err != nil { + if err := tm.writeAny(w, fv, props); err != nil { return err } @@ -377,8 +455,8 @@ func writeStruct(w *textWriter, sv reflect.Value) error { // Extensions (the XXX_extensions field). pv := sv.Addr() - if pv.Type().Implements(extendableProtoType) { - if err := writeExtensions(w, pv); err != nil { + if _, ok := extendable(pv.Interface()); ok { + if err := tm.writeExtensions(w, pv); err != nil { return err } } @@ -408,7 +486,7 @@ func writeRaw(w *textWriter, b []byte) error { } // writeAny writes an arbitrary field. -func writeAny(w *textWriter, v reflect.Value, props *Properties) error { +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) // Floats have special cases. @@ -435,7 +513,7 @@ func writeAny(w *textWriter, v reflect.Value, props *Properties) error { switch v.Kind() { case reflect.Slice: // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Interface().([]byte))); err != nil { + if err := writeString(w, string(v.Bytes())); err != nil { return err } case reflect.String: @@ -457,15 +535,15 @@ func writeAny(w *textWriter, v reflect.Value, props *Properties) error { } } w.indent() - if tm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := tm.MarshalText() + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() if err != nil { return err } if _, err = w.Write(text); err != nil { return err } - } else if err := writeStruct(w, v); err != nil { + } else if err := tm.writeStruct(w, v); err != nil { return err } w.unindent() @@ -525,44 +603,6 @@ func writeString(w *textWriter, s string) error { return w.WriteByte('"') } -func writeMessageSet(w *textWriter, ms *MessageSet) error { - for _, item := range ms.Item { - id := *item.TypeId - if msd, ok := messageSetMap[id]; ok { - // Known message set type. - if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil { - return err - } - w.indent() - - pb := reflect.New(msd.t.Elem()) - if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil { - if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil { - return err - } - } else { - if err := writeStruct(w, pb.Elem()); err != nil { - return err - } - } - } else { - // Unknown type. - if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil { - return err - } - w.indent() - if err := writeUnknownStruct(w, item.Message); err != nil { - return err - } - } - w.unindent() - if _, err := w.Write(gtNewline); err != nil { - return err - } - } - return nil -} - func writeUnknownStruct(w *textWriter, data []byte) (err error) { if !w.compact { if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { @@ -647,19 +687,24 @@ func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // writeExtensions writes all the extensions in pv. // pv is assumed to be a pointer to a protocol message struct that is extendable. -func writeExtensions(w *textWriter, pv reflect.Value) error { +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { emap := extensionMaps[pv.Type().Elem()] - ep := pv.Interface().(extendableProto) + ep, _ := extendable(pv.Interface()) // Order the extensions by ID. // This isn't strictly necessary, but it will give us // canonical output, which will also make testing easier. - m := ep.ExtensionMap() + m, mu := ep.extensionsRead() + if m == nil { + return nil + } + mu.Lock() ids := make([]int32, 0, len(m)) for id := range m { ids = append(ids, id) } sort.Sort(int32Slice(ids)) + mu.Unlock() for _, extNum := range ids { ext := m[extNum] @@ -682,13 +727,13 @@ func writeExtensions(w *textWriter, pv reflect.Value) error { // Repeated extensions will appear as a slice. if !desc.repeated() { - if err := writeExtension(w, desc.Name, pb); err != nil { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { return err } } else { v := reflect.ValueOf(pb) for i := 0; i < v.Len(); i++ { - if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { return err } } @@ -697,7 +742,7 @@ func writeExtensions(w *textWriter, pv reflect.Value) error { return nil } -func writeExtension(w *textWriter, name string, pb interface{}) error { +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { return err } @@ -706,7 +751,7 @@ func writeExtension(w *textWriter, name string, pb interface{}) error { return err } } - if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -731,7 +776,15 @@ func (w *textWriter) writeIndent() { w.complete = false } -func marshalText(w io.Writer, pb Message, compact bool) error { +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { val := reflect.ValueOf(pb) if pb == nil || val.IsNil() { w.Write([]byte("")) @@ -746,11 +799,11 @@ func marshalText(w io.Writer, pb Message, compact bool) error { aw := &textWriter{ w: ww, complete: true, - compact: compact, + compact: tm.Compact, } - if tm, ok := pb.(encoding.TextMarshaler); ok { - text, err := tm.MarshalText() + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() if err != nil { return err } @@ -764,7 +817,7 @@ func marshalText(w io.Writer, pb Message, compact bool) error { } // Dereference the received pointer so we don't have outer < and >. v := reflect.Indirect(val) - if err := writeStruct(aw, v); err != nil { + if err := tm.writeStruct(aw, v); err != nil { return err } if bw != nil { @@ -773,25 +826,29 @@ func marshalText(w io.Writer, pb Message, compact bool) error { return nil } +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + // MarshalText writes a given protocol buffer in text format. // The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { - return marshalText(w, pb, false) -} +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } // MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { - var buf bytes.Buffer - marshalText(&buf, pb, false) - return buf.String() -} +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } // CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) } +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } // CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { - var buf bytes.Buffer - marshalText(&buf, pb, true) - return buf.String() -} +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go index 6d0cf25894..61f83c1e10 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -44,6 +44,9 @@ import ( "unicode/utf8" ) +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + type ParseError struct { Message string Line int // 1-based line number @@ -119,6 +122,14 @@ func isWhitespace(c byte) bool { return false } +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + func (p *textParser) skipWhitespace() { i := 0 for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { @@ -155,7 +166,7 @@ func (p *textParser) advance() { p.cur.offset, p.cur.line = p.offset, p.line p.cur.unquoted = "" switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',': + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': // Single symbol p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] case '"', '\'': @@ -333,13 +344,13 @@ func (p *textParser) next() *token { p.advance() if p.done { p.cur.value = "" - } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' { + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { // Look for multiple quoted strings separated by whitespace, // and concatenate them. cat := p.cur for { p.skipWhitespace() - if p.done || p.s[0] != '"' { + if p.done || !isQuote(p.s[0]) { break } p.advance() @@ -443,7 +454,10 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { fieldSet := make(map[string]bool) // A struct is a sequence of "name: value", terminated by one of // '>' or '}', or the end of the input. A name may also be - // "[extension]". + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > for { tok := p.next() if tok.err != nil { @@ -453,33 +467,74 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { break } if tok.value == "[" { - // Looks like an extension. + // Looks like an extension or an Any. // // TODO: Check whether we need to handle // namespace rooted names (e.g. ".something.Foo"). - tok = p.next() - if tok.err != nil { - return tok.err + extName, err := p.consumeExtName() + if err != nil { + return err } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + var desc *ExtensionDesc // This could be faster, but it's functional. // TODO: Do something smarter than a linear scan. for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == tok.value { + if d.Name == extName { desc = d break } } if desc == nil { - return p.errorf("unrecognized extension %q", tok.value) - } - // Check the extension terminator. - tok = p.next() - if tok.err != nil { - return tok.err - } - if tok.value != "]" { - return p.errorf("unrecognized extension terminator %q", tok.value) + return p.errorf("unrecognized extension %q", extName) } props := &Properties{} @@ -506,7 +561,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { } reqFieldErr = err } - ep := sv.Addr().Interface().(extendableProto) + ep := sv.Addr().Interface().(Message) if !rep { SetExtension(ep, desc, ext.Interface()) } else { @@ -537,7 +592,11 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { props = oop.Prop nv := reflect.New(oop.Type.Elem()) dst = nv.Elem().Field(0) - sv.Field(oop.Field).Set(nv) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) } if !dst.IsValid() { return p.errorf("unknown field name %q in %v", name, st) @@ -558,8 +617,9 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { // The map entry should be this sequence of tokens: // < key : KEY value : VALUE > - // Technically the "key" and "value" could come in any order, - // but in practice they won't. + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. tok := p.next() var terminator string @@ -571,32 +631,39 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { default: return p.errorf("expected '{' or '<', found %q", tok.value) } - if err := p.consumeToken("key"); err != nil { - return err - } - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.mkeyprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - if err := p.consumeToken("value"); err != nil { - return err - } - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.mvalprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - if err := p.consumeToken(terminator); err != nil { - return err + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } } dst.SetMapIndex(key, val) @@ -619,7 +686,8 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { return err } reqFieldErr = err - } else if props.Required { + } + if props.Required { reqCount-- } @@ -635,6 +703,35 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { return reqFieldErr } +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + } + return strings.Join(parts, ""), nil +} + // consumeOptionalSeparator consumes an optional semicolon or comma. // It is used in readStruct to provide backward compatibility. func (p *textParser) consumeOptionalSeparator() error { @@ -699,12 +796,12 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) return p.readAny(fv.Index(fv.Len()-1), props) case reflect.Bool: - // Either "true", "false", 1 or 0. + // true/1/t/True or false/f/0/False. switch tok.value { - case "true", "1": + case "true", "1", "t", "True": fv.SetBool(true) return nil - case "false", "0": + case "false", "0", "f", "False": fv.SetBool(false) return nil } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser_test.go b/vendor/github.com/golang/protobuf/proto/text_parser_test.go index d02c61d1fe..8f7cb4d274 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser_test.go +++ b/vendor/github.com/golang/protobuf/proto/text_parser_test.go @@ -95,7 +95,7 @@ var unMarshalTextTests = []UnmarshalTextTest{ }, }, - // Quoted string concatenation + // Quoted string concatenation with double quotes { in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, out: &MyMessage{ @@ -104,6 +104,31 @@ var unMarshalTextTests = []UnmarshalTextTest{ }, }, + // Quoted string concatenation with single quotes + { + in: "count:42 name: 'My name is '\n'elsewhere'", + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + + // Quoted string concatenations with mixed quotes + { + in: "count:42 name: 'My name is '\n\"elsewhere\"", + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + { + in: "count:42 name: \"My name is \"\n'elsewhere'", + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + // Quoted string with escaped apostrophe { in: `count:42 name: "HOLIDAY - New Year\'s Day"`, @@ -311,6 +336,16 @@ var unMarshalTextTests = []UnmarshalTextTest{ }, }, + // Missing required field in a required submessage + { + in: `count: 42 we_must_go_deeper < leo_finally_won_an_oscar <> >`, + err: `proto: required field "testdata.InnerMessage.host" not set`, + out: &MyMessage{ + Count: Int32(42), + WeMustGoDeeper: &RequiredInnerMessage{LeoFinallyWonAnOscar: &InnerMessage{}}, + }, + }, + // Repeated non-repeated field { in: `name: "Rob" name: "Russ"`, @@ -345,6 +380,95 @@ var unMarshalTextTests = []UnmarshalTextTest{ }, }, + // Boolean false + { + in: `count:42 inner { host: "example.com" connected: false }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(false), + }, + }, + }, + // Boolean true + { + in: `count:42 inner { host: "example.com" connected: true }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(true), + }, + }, + }, + // Boolean 0 + { + in: `count:42 inner { host: "example.com" connected: 0 }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(false), + }, + }, + }, + // Boolean 1 + { + in: `count:42 inner { host: "example.com" connected: 1 }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(true), + }, + }, + }, + // Boolean f + { + in: `count:42 inner { host: "example.com" connected: f }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(false), + }, + }, + }, + // Boolean t + { + in: `count:42 inner { host: "example.com" connected: t }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(true), + }, + }, + }, + // Boolean False + { + in: `count:42 inner { host: "example.com" connected: False }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(false), + }, + }, + }, + // Boolean True + { + in: `count:42 inner { host: "example.com" connected: True }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(true), + }, + }, + }, + // Extension buildExtStructTest(`count: 42 [testdata.Ext.more]:`), buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), @@ -473,7 +597,10 @@ func TestMapParsing(t *testing.T) { const in = `name_mapping: name_mapping:` + `msg_mapping:,>` + // separating commas are okay `msg_mapping>` + // no colon after "value" - `byte_mapping:` + `msg_mapping:>` + // omitted key + `msg_mapping:` + // omitted value + `byte_mapping:` + + `byte_mapping:<>` // omitted key and value want := &MessageWithMap{ NameMapping: map[int32]string{ 1: "Beatles", @@ -482,9 +609,12 @@ func TestMapParsing(t *testing.T) { MsgMapping: map[int64]*FloatingPoint{ -4: {F: Float64(2.0)}, -2: {F: Float64(4.0)}, + 0: {F: Float64(5.0)}, + 1: nil, }, ByteMapping: map[bool][]byte{ - true: []byte("so be it"), + false: nil, + true: []byte("so be it"), }, } if err := UnmarshalText(in, m); err != nil { @@ -505,6 +635,17 @@ func TestOneofParsing(t *testing.T) { if !Equal(m, want) { t.Errorf("\n got %v\nwant %v", m, want) } + + const inOverwrite = `name:"Shrek" number:42` + m = new(Communique) + testErr := "line 1.13: field 'number' would overwrite already parsed oneof 'Union'" + if err := UnmarshalText(inOverwrite, m); err == nil { + t.Errorf("TestOneofParsing: Didn't get expected error: %v", testErr) + } else if err.Error() != testErr { + t.Errorf("TestOneofParsing: Incorrect error.\nHave: %v\nWant: %v", + err.Error(), testErr) + } + } var benchInput string diff --git a/vendor/github.com/golang/protobuf/ptypes/LICENSE b/vendor/github.com/golang/protobuf/ptypes/LICENSE new file mode 100644 index 0000000000..1b1b1921ef --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/LICENSE @@ -0,0 +1,31 @@ +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 0000000000..89e07ae192 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,136 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *any.Any) (string, error) { + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*any.Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func Empty(any *any.Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *any.Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = Empty(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *any.Any, pb proto.Message) bool { + aname, err := AnyMessageName(any) + if err != nil { + return false + } + + return aname == proto.MessageName(pb) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 0000000000..1fbaa44caa --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,168 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/any/any.proto + +/* +Package any is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/any/any.proto + +It has these top-level messages: + Any +*/ +package any + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Any) XXX_WellKnownType() string { return "Any" } + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 184 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc, + 0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c, + 0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69, + 0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24, + 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1, + 0x38, 0xe5, 0x73, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19, + 0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x45, 0x4c, 0xcc, 0xee, 0x01, 0x4e, 0xab, + 0x98, 0xe4, 0xdc, 0x21, 0x46, 0x05, 0x40, 0x95, 0xe8, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, + 0x97, 0xe7, 0x85, 0x80, 0x94, 0x26, 0xb1, 0x81, 0xf5, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, + 0x45, 0x1f, 0x1a, 0xf2, 0xf3, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto b/vendor/github.com/golang/protobuf/ptypes/doc.go similarity index 70% rename from vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto rename to vendor/github.com/golang/protobuf/ptypes/doc.go index e2311d9294..c0d595da7a 100644 --- a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -1,6 +1,6 @@ // Go support for Protocol Buffers - Google's data interchange format // -// Copyright 2014 The Go Authors. All rights reserved. +// Copyright 2016 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without @@ -29,40 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -syntax = "proto3"; - -import "testdata/test.proto"; - -package proto3_proto; - -message Message { - enum Humour { - UNKNOWN = 0; - PUNS = 1; - SLAPSTICK = 2; - BILL_BAILEY = 3; - } - - string name = 1; - Humour hilarity = 2; - uint32 height_in_cm = 3; - bytes data = 4; - int64 result_count = 7; - bool true_scotsman = 8; - float score = 9; - - repeated uint64 key = 5; - Nested nested = 6; - - map terrain = 10; - testdata.SubDefaults proto2_field = 11; - map proto2_value = 13; -} - -message Nested { - string bunny = 1; -} - -message MessageWithMap { - map byte_mapping = 1; -} +/* +Package ptypes contains code for interacting with well-known types. +*/ +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 0000000000..65cb0f8eb5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,102 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + // Range of a durpb.Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the durpb.Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid durpb.Duration +// may still be too large to fit into a time.Duration (the range of durpb.Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *durpb.Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) + } + return nil +} + +// Duration converts a durpb.Duration to a time.Duration. Duration +// returns an error if the durpb.Duration is invalid or is too large to be +// represented in a time.Duration. +func Duration(p *durpb.Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durpb.Duration. +func DurationProto(d time.Duration) *durpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durpb.Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 0000000000..fe3350bed0 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,146 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/duration/duration.proto + +/* +Package duration is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/duration/duration.proto + +It has these top-level messages: + Duration +*/ +package duration + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Duration) XXX_WellKnownType() string { return "Duration" } + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 189 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29, + 0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, + 0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8, + 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60, + 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6, + 0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98, + 0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xfb, 0x83, 0x91, 0x71, 0x11, 0x13, + 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9, + 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x45, 0x5a, 0x81, 0x3d, 0x0e, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go new file mode 100644 index 0000000000..ae15941446 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -0,0 +1,68 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/empty/empty.proto + +/* +Package empty is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/empty/empty.proto + +It has these top-level messages: + Empty +*/ +package empty + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Empty) XXX_WellKnownType() string { return "Empty" } + +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/empty/empty.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 147 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4e, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcd, + 0x2d, 0x28, 0xa9, 0x84, 0x90, 0x7a, 0x60, 0x39, 0x21, 0xfe, 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54, + 0x3d, 0x98, 0x4a, 0x25, 0x76, 0x2e, 0x56, 0x57, 0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, + 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x27, 0xd2, + 0xce, 0x1f, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, + 0x0c, 0x80, 0xaa, 0xd3, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, + 0x4f, 0x62, 0x03, 0x1b, 0x60, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x6e, 0x8e, 0x0a, 0x06, 0xcf, + 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go new file mode 100644 index 0000000000..35a8ec5994 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go @@ -0,0 +1,382 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/struct/struct.proto + +/* +Package structpb is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/struct/struct.proto + +It has these top-level messages: + Struct + Value + ListValue +*/ +package structpb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (x NullValue) String() string { + return proto.EnumName(NullValue_name, int32(x)) +} +func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (m *Struct) String() string { return proto.CompactTextString(m) } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Struct) XXX_WellKnownType() string { return "Struct" } + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*Value) XXX_WellKnownType() string { return "Value" } + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"` +} +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,oneof"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,oneof"` +} +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,oneof"` +} +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,oneof"` +} +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} +func (*Value_NumberValue) isValue_Kind() {} +func (*Value_StringValue) isValue_Kind() {} +func (*Value_BoolValue) isValue_Kind() {} +func (*Value_StructValue) isValue_Kind() {} +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + b.EncodeVarint(2<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.NumberValue)) + case *Value_StringValue: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *Value_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Value_StructValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StructValue); err != nil { + return err + } + case *Value_ListValue: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ListValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Value.Kind has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 1: // kind.null_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_NullValue{NullValue(x)} + return true, err + case 2: // kind.number_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Kind = &Value_NumberValue{math.Float64frombits(x)} + return true, err + case 3: // kind.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Kind = &Value_StringValue{x} + return true, err + case 4: // kind.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_BoolValue{x != 0} + return true, err + case 5: // kind.struct_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Struct) + err := b.DecodeMessage(msg) + m.Kind = &Value_StructValue{msg} + return true, err + case 6: // kind.list_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ListValue) + err := b.DecodeMessage(msg) + m.Kind = &Value_ListValue{msg} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + n += proto.SizeVarint(2<<3 | proto.WireFixed64) + n += 8 + case *Value_StringValue: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BoolValue: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += 1 + case *Value_StructValue: + s := proto.Size(x.StructValue) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_ListValue: + s := proto.Size(x.ListValue) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (m *ListValue) String() string { return proto.CompactTextString(m) } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/struct/struct.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0x80, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa0, 0xa1, 0x7b, 0x09, + 0x22, 0x09, 0x56, 0x04, 0x31, 0x5e, 0x0c, 0xac, 0xbb, 0x60, 0x58, 0x62, 0x74, 0x57, 0xf0, 0x52, + 0x9a, 0x34, 0x8d, 0xa1, 0xd3, 0x99, 0x90, 0xcc, 0x28, 0x3d, 0xfa, 0x2f, 0x3c, 0x7b, 0xf4, 0xe8, + 0xaf, 0xf3, 0x28, 0x33, 0x93, 0x44, 0x69, 0x29, 0x78, 0x9a, 0xbe, 0x37, 0xdf, 0xfb, 0xe6, 0xbd, + 0xd7, 0xc0, 0xf3, 0xb2, 0xe2, 0x9f, 0x45, 0xe6, 0xe7, 0x6c, 0x13, 0x94, 0x8c, 0x2c, 0x68, 0x19, + 0xd4, 0x0d, 0xe3, 0x2c, 0x13, 0xab, 0xa0, 0xe6, 0xdb, 0xba, 0x68, 0x83, 0x96, 0x37, 0x22, 0xe7, + 0xdd, 0xe1, 0xab, 0x5b, 0x7c, 0xa7, 0x64, 0xac, 0x24, 0x85, 0xdf, 0xb3, 0xd3, 0xef, 0x08, 0xac, + 0xf7, 0x8a, 0xc0, 0x21, 0x58, 0xab, 0xaa, 0x20, 0xcb, 0x76, 0x82, 0x5c, 0xd3, 0x73, 0x66, 0x67, + 0xfe, 0x0e, 0xec, 0x6b, 0xd0, 0x7f, 0xa3, 0xa8, 0x73, 0xca, 0x9b, 0x6d, 0xda, 0x95, 0x9c, 0xbe, + 0x03, 0xe7, 0x9f, 0x34, 0x3e, 0x01, 0x73, 0x5d, 0x6c, 0x27, 0xc8, 0x45, 0x9e, 0x9d, 0xca, 0x9f, + 0xf8, 0x09, 0x8c, 0xbf, 0x2c, 0x88, 0x28, 0x26, 0x86, 0x8b, 0x3c, 0x67, 0x76, 0x6f, 0x4f, 0x7e, + 0x23, 0x6f, 0x53, 0x0d, 0xbd, 0x34, 0x5e, 0xa0, 0xe9, 0x2f, 0x03, 0xc6, 0x2a, 0x89, 0x43, 0x00, + 0x2a, 0x08, 0x99, 0x6b, 0x81, 0x94, 0x1e, 0xcf, 0x4e, 0xf7, 0x04, 0x57, 0x82, 0x10, 0xc5, 0x5f, + 0x8e, 0x52, 0x9b, 0xf6, 0x01, 0x3e, 0x83, 0xdb, 0x54, 0x6c, 0xb2, 0xa2, 0x99, 0xff, 0x7d, 0x1f, + 0x5d, 0x8e, 0x52, 0x47, 0x67, 0x07, 0xa8, 0xe5, 0x4d, 0x45, 0xcb, 0x0e, 0x32, 0x65, 0xe3, 0x12, + 0xd2, 0x59, 0x0d, 0x3d, 0x02, 0xc8, 0x18, 0xeb, 0xdb, 0x38, 0x72, 0x91, 0x77, 0x4b, 0x3e, 0x25, + 0x73, 0x1a, 0x78, 0xa5, 0x2c, 0x22, 0xe7, 0x1d, 0x32, 0x56, 0xa3, 0xde, 0x3f, 0xb0, 0xc7, 0x4e, + 0x2f, 0x72, 0x3e, 0x4c, 0x49, 0xaa, 0xb6, 0xaf, 0xb5, 0x54, 0xed, 0xfe, 0x94, 0x71, 0xd5, 0xf2, + 0x61, 0x4a, 0xd2, 0x07, 0x91, 0x05, 0x47, 0xeb, 0x8a, 0x2e, 0xa7, 0x21, 0xd8, 0x03, 0x81, 0x7d, + 0xb0, 0x94, 0xac, 0xff, 0x47, 0x0f, 0x2d, 0xbd, 0xa3, 0x1e, 0x3f, 0x00, 0x7b, 0x58, 0x22, 0x3e, + 0x06, 0xb8, 0xba, 0x8e, 0xe3, 0xf9, 0xcd, 0xeb, 0xf8, 0xfa, 0xfc, 0x64, 0x14, 0x7d, 0x43, 0x70, + 0x37, 0x67, 0x9b, 0x5d, 0x45, 0xe4, 0xe8, 0x69, 0x12, 0x19, 0x27, 0xe8, 0xd3, 0xd3, 0xff, 0xfd, + 0x30, 0x43, 0x7d, 0xd4, 0xd9, 0x6f, 0x84, 0x7e, 0x18, 0xe6, 0x45, 0x12, 0xfd, 0x34, 0x1e, 0x5e, + 0x68, 0x79, 0xd2, 0xf7, 0xf7, 0xb1, 0x20, 0xe4, 0x2d, 0x65, 0x5f, 0xe9, 0x07, 0x59, 0x99, 0x59, + 0x4a, 0xf5, 0xec, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9b, 0x6e, 0x5d, 0x3c, 0xfe, 0x02, 0x00, + 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 0000000000..1b36576220 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,125 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *tspb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func Timestamp(ts *tspb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*tspb.Timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := &tspb.Timestamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *tspb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 0000000000..3b76261ec5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,162 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto + +/* +Package timestamp is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/timestamp/timestamp.proto + +It has these top-level messages: + Timestamp +*/ +package timestamp + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 190 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9, + 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3, + 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24, + 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, + 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, + 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x8e, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x4c, 0x27, 0x3e, + 0xb8, 0x89, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0xdc, 0xfc, 0x83, 0x91, 0x71, 0x11, + 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, 0xe1, + 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, 0x8c, + 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x59, 0x0a, 0x4d, 0x13, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go new file mode 100644 index 0000000000..1328bd296a --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -0,0 +1,262 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto + +/* +Package wrappers is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/wrappers/wrappers.proto + +It has these top-level messages: + DoubleValue + FloatValue + Int64Value + UInt64Value + Int32Value + UInt32Value + BoolValue + StringValue + BytesValue +*/ +package wrappers + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (m *DoubleValue) String() string { return proto.CompactTextString(m) } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value" json:"value,omitempty"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (m *FloatValue) String() string { return proto.CompactTextString(m) } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (m *Int64Value) String() string { return proto.CompactTextString(m) } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (m *UInt64Value) String() string { return proto.CompactTextString(m) } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (m *Int32Value) String() string { return proto.CompactTextString(m) } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (m *UInt32Value) String() string { return proto.CompactTextString(m) } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (m *BoolValue) String() string { return proto.CompactTextString(m) } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (m *StringValue) String() string { return proto.CompactTextString(m) } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (m *BytesValue) String() string { return proto.CompactTextString(m) } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/wrappers/wrappers.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 257 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0x2f, + 0x4a, 0x2c, 0x28, 0x48, 0x2d, 0x42, 0x30, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, 0xd3, + 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0x94, 0xb9, 0xb8, 0x5d, 0xf2, 0x4b, 0x93, 0x72, 0x52, 0xc3, + 0x12, 0x73, 0x4a, 0x53, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x40, 0x0c, 0x09, 0x46, 0x05, 0x46, 0x0d, + 0xc6, 0x20, 0x08, 0x47, 0x49, 0x89, 0x8b, 0xcb, 0x2d, 0x27, 0x3f, 0xb1, 0x04, 0x8b, 0x1a, 0x26, + 0x24, 0x35, 0x9e, 0x79, 0x25, 0x66, 0x26, 0x58, 0xd4, 0x30, 0xc3, 0xd4, 0x28, 0x73, 0x71, 0x87, + 0xe2, 0x52, 0xc4, 0x82, 0x6a, 0x90, 0xb1, 0x11, 0x16, 0x35, 0xac, 0x68, 0x06, 0x61, 0x55, 0xc4, + 0x0b, 0x53, 0xa4, 0xc8, 0xc5, 0xe9, 0x94, 0x9f, 0x9f, 0x83, 0x45, 0x09, 0x07, 0x92, 0x39, 0xc1, + 0x25, 0x45, 0x99, 0x79, 0xe9, 0x58, 0x14, 0x71, 0x22, 0x39, 0xc8, 0xa9, 0xb2, 0x24, 0xb5, 0x18, + 0x8b, 0x1a, 0x1e, 0xa8, 0x1a, 0xa7, 0x1a, 0x2e, 0xe1, 0xe4, 0xfc, 0x5c, 0x3d, 0xb4, 0xd0, 0x75, + 0xe2, 0x0d, 0x87, 0x06, 0x7f, 0x00, 0x48, 0x24, 0x80, 0x31, 0x4a, 0x8b, 0xf8, 0xa8, 0xfb, 0xc1, + 0xc8, 0xb8, 0x88, 0x89, 0xd9, 0x3d, 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0xc4, 0xdc, 0x00, 0xa8, + 0x52, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, 0xbc, 0x10, 0x90, 0x96, 0x24, 0x36, + 0xb0, 0x19, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xee, 0x36, 0x8d, 0xd8, 0x19, 0x02, 0x00, + 0x00, +} diff --git a/vendor/github.com/weaveworks/common/backoff/backoff.go b/vendor/github.com/weaveworks/common/backoff/backoff.go index 94f5ad6fbf..01cba44090 100644 --- a/vendor/github.com/weaveworks/common/backoff/backoff.go +++ b/vendor/github.com/weaveworks/common/backoff/backoff.go @@ -17,7 +17,7 @@ type backoff struct { // each iterations. If it hits an error, it exponentially backs // off to maxBackoff. Backoff will log when it backs off, but // will stop logging when it reaches maxBackoff. It will also -// log on first success. +// log on first success in the beginning and after errors. type Interface interface { Start() Stop() @@ -65,12 +65,13 @@ func (b *backoff) Start() { if err != nil { backoff *= 2 + shouldLog = true if backoff > b.maxBackoff { backoff = b.maxBackoff + shouldLog = false } - } else if backoff > b.initialBackoff { + } else { backoff = b.initialBackoff - shouldLog = true } if shouldLog { @@ -82,9 +83,9 @@ func (b *backoff) Start() { } } - if backoff >= b.maxBackoff || err == nil { - shouldLog = false - } + // Re-enable logging if we came from an error (suppressed or not) + // since we want to log in case a success follows. + shouldLog = err != nil select { case <-time.After(backoff): diff --git a/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.go b/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.go index 87d612390b..41600c2db1 100644 --- a/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.go +++ b/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.go @@ -1,179 +1,56 @@ package httpgrpc import ( - "bytes" "fmt" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "strings" - "sync" - "time" - "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" - "github.com/mwitkow/go-grpc-middleware" - "github.com/opentracing/opentracing-go" - "github.com/sercand/kuberesolver" - "golang.org/x/net/context" - "google.golang.org/grpc" - - "github.com/weaveworks/common/middleware" + log "github.com/Sirupsen/logrus" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/any" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/status" ) -const dialTimeout = 5 * time.Second - -// Server implements HTTPServer. HTTPServer is a generated interface that gRPC -// servers must implement. -type Server struct { - handler http.Handler -} - -// NewServer makes a new Server. -func NewServer(handler http.Handler) *Server { - return &Server{ - handler: handler, - } -} - -// Handle implements HTTPServer. -func (s Server) Handle(ctx context.Context, r *HTTPRequest) (*HTTPResponse, error) { - req, err := http.NewRequest(r.Method, r.Url, ioutil.NopCloser(bytes.NewReader(r.Body))) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - toHeader(r.Headers, req.Header) - req.RequestURI = r.Url - recorder := httptest.NewRecorder() - s.handler.ServeHTTP(recorder, req) - resp := &HTTPResponse{ - Code: int32(recorder.Code), - Headers: fromHeader(recorder.Header()), - Body: recorder.Body.Bytes(), - } - return resp, nil -} - -// Client is a http.Handler that forwards the request over gRPC. -type Client struct { - mtx sync.RWMutex - service string - namespace string - port string - client HTTPClient - conn *grpc.ClientConn -} - -// ParseKubernetesAddress splits up an address of the form (.): -// into its consistuent parts. Namespace will be "default" if missing. -func ParseKubernetesAddress(address string) (service, namespace, port string, err error) { - host, port, err := net.SplitHostPort(address) - if err != nil { - return "", "", "", err - } - parts := strings.SplitN(host, ".", 2) - service, namespace = parts[0], "default" - if len(parts) == 2 { - namespace = parts[1] - } - return service, namespace, port, nil -} - -// NewClient makes a new Client, given a kubernetes service address. -func NewClient(address string) (*Client, error) { - service, namespace, port, err := ParseKubernetesAddress(address) - if err != nil { - return nil, err - } - return &Client{ - service: service, - namespace: namespace, - port: port, - }, nil +// Errorf returns a HTTP gRPC error than is correctly forwarded over +// gRPC, and can eventually be converted back to a HTTP response with +// HTTPResponseFromError. +func Errorf(code int, tmpl string, args ...interface{}) error { + return ErrorFromHTTPResponse(&HTTPResponse{ + Code: int32(code), + Body: []byte(fmt.Sprintf(tmpl, args...)), + }) } -func (c *Client) connect(ctx context.Context) error { - c.mtx.RLock() - connected := c.conn != nil - c.mtx.RUnlock() - if connected { - return nil - } - - c.mtx.Lock() - defer c.mtx.Unlock() - if c.conn != nil { - return nil - } - - balancer := kuberesolver.NewWithNamespace(c.namespace) - ctxDeadline, cancel := context.WithTimeout(ctx, dialTimeout) - defer cancel() - conn, err := grpc.DialContext( - ctxDeadline, - fmt.Sprintf("kubernetes://%s:%s", c.service, c.port), - balancer.DialOption(), - grpc.WithInsecure(), - grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient( - otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), - middleware.ClientUserHeaderInterceptor, - )), - ) +// ErrorFromHTTPResponse converts an HTTP response into a grpc error +func ErrorFromHTTPResponse(resp *HTTPResponse) error { + a, err := ptypes.MarshalAny(resp) if err != nil { return err } - c.client = NewHTTPClient(conn) - c.conn = conn - return nil -} - -// ServeHTTP implements http.Handler -func (c *Client) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if err := c.connect(r.Context()); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - body, err := ioutil.ReadAll(r.Body) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - req := &HTTPRequest{ - Method: r.Method, - Url: r.RequestURI, - Body: body, - Headers: fromHeader(r.Header), - } + return status.ErrorProto(&spb.Status{ + Code: resp.Code, + Message: string(resp.Body), + Details: []*any.Any{a}, + }) +} - resp, err := c.client.Handle(r.Context(), req) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return +// HTTPResponseFromError converts a grpc error into an HTTP response +func HTTPResponseFromError(err error) (*HTTPResponse, bool) { + s, ok := status.FromError(err) + if !ok { + return nil, false } - toHeader(resp.Headers, w.Header()) - w.WriteHeader(int(resp.Code)) - if _, err := w.Write(resp.Body); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + status := s.Proto() + if len(status.Details) != 1 { + return nil, false } -} -func toHeader(hs []*Header, header http.Header) { - for _, h := range hs { - header[h.Key] = h.Values + var resp HTTPResponse + if err := ptypes.UnmarshalAny(status.Details[0], &resp); err != nil { + log.Errorf("Got error containing non-response: %v", err) + return nil, false } -} -func fromHeader(hs http.Header) []*Header { - result := make([]*Header, 0, len(hs)) - for k, vs := range hs { - result = append(result, &Header{ - Key: k, - Values: vs, - }) - } - return result + return &resp, true } diff --git a/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.pb.go b/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.pb.go index 02b2a47869..8b498ced0e 100644 --- a/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.pb.go +++ b/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: httpgrpc.proto -// DO NOT EDIT! /* Package httpgrpc is a generated protocol buffer package. @@ -213,7 +212,7 @@ func init() { proto.RegisterFile("httpgrpc.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 231 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x90, 0x31, 0x4f, 0xc3, 0x30, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0x31, 0x4f, 0xc3, 0x30, 0x10, 0x85, 0x49, 0x1d, 0x0c, 0xbd, 0x56, 0xa8, 0x3a, 0x89, 0xca, 0x62, 0x8a, 0x32, 0x45, 0x0c, 0x1d, 0xc2, 0xc4, 0x88, 0x58, 0x32, 0x22, 0xab, 0x7f, 0x20, 0xc1, 0x27, 0x22, 0x11, 0x6a, 0x63, 0x3b, 0xa0, 0xfe, 0x7b, 0x64, 0x3b, 0x85, 0x88, 0xa9, 0xdb, 0x7b, 0xe7, 0x27, 0x7f, 0xf7, 0x0e, diff --git a/vendor/github.com/weaveworks/common/httpgrpc/server/server.go b/vendor/github.com/weaveworks/common/httpgrpc/server/server.go new file mode 100644 index 0000000000..0ccad0a695 --- /dev/null +++ b/vendor/github.com/weaveworks/common/httpgrpc/server/server.go @@ -0,0 +1,183 @@ +package server + +import ( + "bytes" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "sync" + + "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" + "github.com/mwitkow/go-grpc-middleware" + "github.com/opentracing/opentracing-go" + "github.com/sercand/kuberesolver" + "golang.org/x/net/context" + "google.golang.org/grpc" + + "github.com/weaveworks/common/httpgrpc" + "github.com/weaveworks/common/middleware" +) + +// Server implements HTTPServer. HTTPServer is a generated interface that gRPC +// servers must implement. +type Server struct { + handler http.Handler +} + +// NewServer makes a new Server. +func NewServer(handler http.Handler) *Server { + return &Server{ + handler: handler, + } +} + +// Handle implements HTTPServer. +func (s Server) Handle(ctx context.Context, r *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) { + req, err := http.NewRequest(r.Method, r.Url, ioutil.NopCloser(bytes.NewReader(r.Body))) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + toHeader(r.Headers, req.Header) + req.RequestURI = r.Url + recorder := httptest.NewRecorder() + s.handler.ServeHTTP(recorder, req) + resp := &httpgrpc.HTTPResponse{ + Code: int32(recorder.Code), + Headers: fromHeader(recorder.Header()), + Body: recorder.Body.Bytes(), + } + if recorder.Code/100 == 5 { + return nil, httpgrpc.ErrorFromHTTPResponse(resp) + } + return resp, err +} + +// Client is a http.Handler that forwards the request over gRPC. +type Client struct { + mtx sync.RWMutex + service string + namespace string + port string + client httpgrpc.HTTPClient + conn *grpc.ClientConn +} + +// ParseURL deals with direct:// style URLs, as well as kubernetes:// urls. +// For backwards compatibility it treats URLs without schems as kubernetes://. +func ParseURL(unparsed string) (string, []grpc.DialOption, error) { + parsed, err := url.Parse(unparsed) + if err != nil { + return "", nil, err + } + + scheme, host := parsed.Scheme, parsed.Host + if !strings.Contains(unparsed, "://") { + scheme, host = "kubernetes", unparsed + } + + switch scheme { + case "direct": + return host, nil, err + + case "kubernetes": + host, port, err := net.SplitHostPort(host) + if err != nil { + return "", nil, err + } + parts := strings.SplitN(host, ".", 2) + service, namespace := parts[0], "default" + if len(parts) == 2 { + namespace = parts[1] + } + balancer := kuberesolver.NewWithNamespace(namespace) + address := fmt.Sprintf("kubernetes://%s:%s", service, port) + dialOptions := []grpc.DialOption{balancer.DialOption()} + return address, dialOptions, nil + + default: + return "", nil, fmt.Errorf("unrecognised scheme: %s", parsed.Scheme) + } +} + +// NewClient makes a new Client, given a kubernetes service address. +func NewClient(address string) (*Client, error) { + address, dialOptions, err := ParseURL(address) + if err != nil { + return nil, err + } + + dialOptions = append( + dialOptions, + grpc.WithInsecure(), + grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient( + otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), + middleware.ClientUserHeaderInterceptor, + )), + ) + + conn, err := grpc.Dial(address, dialOptions...) + if err != nil { + return nil, err + } + + return &Client{ + client: httpgrpc.NewHTTPClient(conn), + conn: conn, + }, nil +} + +// ServeHTTP implements http.Handler +func (c *Client) ServeHTTP(w http.ResponseWriter, r *http.Request) { + body, err := ioutil.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + req := &httpgrpc.HTTPRequest{ + Method: r.Method, + Url: r.RequestURI, + Body: body, + Headers: fromHeader(r.Header), + } + + resp, err := c.client.Handle(r.Context(), req) + if err != nil { + // Some errors will actually contain a valid resp, just need to unpack it + var ok bool + resp, ok = httpgrpc.HTTPResponseFromError(err) + + if !ok { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + + toHeader(resp.Headers, w.Header()) + w.WriteHeader(int(resp.Code)) + if _, err := w.Write(resp.Body); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +func toHeader(hs []*httpgrpc.Header, header http.Header) { + for _, h := range hs { + header[h.Key] = h.Values + } +} + +func fromHeader(hs http.Header) []*httpgrpc.Header { + result := make([]*httpgrpc.Header, 0, len(hs)) + for k, vs := range hs { + result = append(result, &httpgrpc.Header{ + Key: k, + Values: vs, + }) + } + return result +} diff --git a/vendor/github.com/weaveworks/common/logging/logging.go b/vendor/github.com/weaveworks/common/logging/logging.go index 296bee12f4..034b51aa47 100644 --- a/vendor/github.com/weaveworks/common/logging/logging.go +++ b/vendor/github.com/weaveworks/common/logging/logging.go @@ -6,7 +6,10 @@ import ( "os" "strings" + "golang.org/x/net/context" + log "github.com/Sirupsen/logrus" + "github.com/weaveworks/common/user" ) // Setup configures logging output to stderr, sets the log level and sets the formatter. @@ -43,3 +46,12 @@ func (f *textFormatter) Format(entry *log.Entry) ([]byte, error) { b.WriteByte('\n') return b.Bytes(), nil } + +// With returns a log entry with common Weaveworks logging information. +// +// e.g. +// logger := logging.With(ctx) +// logger.Errorf("Some error") +func With(ctx context.Context) *log.Entry { + return log.WithFields(user.LogFields(ctx)) +} diff --git a/vendor/github.com/weaveworks/common/middleware/grpc_instrumentation.go b/vendor/github.com/weaveworks/common/middleware/grpc_instrumentation.go index 3d002be894..0614433448 100644 --- a/vendor/github.com/weaveworks/common/middleware/grpc_instrumentation.go +++ b/vendor/github.com/weaveworks/common/middleware/grpc_instrumentation.go @@ -1,23 +1,30 @@ package middleware import ( + "strconv" "time" "github.com/prometheus/client_golang/prometheus" + "github.com/weaveworks/common/httpgrpc" "golang.org/x/net/context" "google.golang.org/grpc" ) // ServerInstrumentInterceptor instruments gRPC requests for errors and latency. -func ServerInstrumentInterceptor(duration *prometheus.HistogramVec) grpc.UnaryServerInterceptor { +func ServerInstrumentInterceptor(hist *prometheus.HistogramVec) grpc.UnaryServerInterceptor { return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { begin := time.Now() resp, err := handler(ctx, req) - status := "success" + duration := time.Since(begin).Seconds() + respStatus := "success" if err != nil { - status = "error" + if errResp, ok := httpgrpc.HTTPResponseFromError(err); ok { + respStatus = strconv.Itoa(int(errResp.Code)) + } else { + respStatus = "error" + } } - duration.WithLabelValues(gRPC, info.FullMethod, status, "false").Observe(time.Since(begin).Seconds()) + hist.WithLabelValues(gRPC, info.FullMethod, respStatus, "false").Observe(duration) return resp, err } } diff --git a/vendor/github.com/weaveworks/common/middleware/path_rewrite.go b/vendor/github.com/weaveworks/common/middleware/path_rewrite.go index 65eb2e9f24..064cbacfe7 100644 --- a/vendor/github.com/weaveworks/common/middleware/path_rewrite.go +++ b/vendor/github.com/weaveworks/common/middleware/path_rewrite.go @@ -2,7 +2,10 @@ package middleware import ( "net/http" + "net/url" "regexp" + + log "github.com/Sirupsen/logrus" ) // PathRewrite supports regex matching and replace on Request URIs @@ -21,7 +24,14 @@ type pathRewrite struct { func (p pathRewrite) Wrap(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { r.RequestURI = p.regexp.ReplaceAllString(r.RequestURI, p.replacement) - r.URL.Path = p.regexp.ReplaceAllString(r.URL.Path, p.replacement) + r.URL.RawPath = p.regexp.ReplaceAllString(r.URL.EscapedPath(), p.replacement) + path, err := url.PathUnescape(r.URL.RawPath) + if err != nil { + log.Errorf("Got invalid url-encoded path %v after applying path rewrite %v: %v", r.URL.RawPath, p, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + r.URL.Path = path next.ServeHTTP(w, r) }) } diff --git a/vendor/github.com/weaveworks/common/server/fake_server.pb.go b/vendor/github.com/weaveworks/common/server/fake_server.pb.go new file mode 100644 index 0000000000..ab3563a0c8 --- /dev/null +++ b/vendor/github.com/weaveworks/common/server/fake_server.pb.go @@ -0,0 +1,210 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: fake_server.proto + +/* +Package server is a generated protocol buffer package. + +It is generated from these files: + fake_server.proto + +It has these top-level messages: + FailWithHTTPErrorRequest +*/ +package server + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type FailWithHTTPErrorRequest struct { + Code int32 `protobuf:"varint,1,opt,name=Code" json:"Code,omitempty"` +} + +func (m *FailWithHTTPErrorRequest) Reset() { *m = FailWithHTTPErrorRequest{} } +func (m *FailWithHTTPErrorRequest) String() string { return proto.CompactTextString(m) } +func (*FailWithHTTPErrorRequest) ProtoMessage() {} +func (*FailWithHTTPErrorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *FailWithHTTPErrorRequest) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func init() { + proto.RegisterType((*FailWithHTTPErrorRequest)(nil), "server.FailWithHTTPErrorRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for FakeServer service + +type FakeServerClient interface { + Succeed(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*google_protobuf.Empty, error) + FailWithError(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*google_protobuf.Empty, error) + FailWithHTTPError(ctx context.Context, in *FailWithHTTPErrorRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) +} + +type fakeServerClient struct { + cc *grpc.ClientConn +} + +func NewFakeServerClient(cc *grpc.ClientConn) FakeServerClient { + return &fakeServerClient{cc} +} + +func (c *fakeServerClient) Succeed(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { + out := new(google_protobuf.Empty) + err := grpc.Invoke(ctx, "/server.FakeServer/Succeed", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *fakeServerClient) FailWithError(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { + out := new(google_protobuf.Empty) + err := grpc.Invoke(ctx, "/server.FakeServer/FailWithError", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *fakeServerClient) FailWithHTTPError(ctx context.Context, in *FailWithHTTPErrorRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { + out := new(google_protobuf.Empty) + err := grpc.Invoke(ctx, "/server.FakeServer/FailWithHTTPError", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for FakeServer service + +type FakeServerServer interface { + Succeed(context.Context, *google_protobuf.Empty) (*google_protobuf.Empty, error) + FailWithError(context.Context, *google_protobuf.Empty) (*google_protobuf.Empty, error) + FailWithHTTPError(context.Context, *FailWithHTTPErrorRequest) (*google_protobuf.Empty, error) +} + +func RegisterFakeServerServer(s *grpc.Server, srv FakeServerServer) { + s.RegisterService(&_FakeServer_serviceDesc, srv) +} + +func _FakeServer_Succeed_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_protobuf.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FakeServerServer).Succeed(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/server.FakeServer/Succeed", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FakeServerServer).Succeed(ctx, req.(*google_protobuf.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _FakeServer_FailWithError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_protobuf.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FakeServerServer).FailWithError(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/server.FakeServer/FailWithError", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FakeServerServer).FailWithError(ctx, req.(*google_protobuf.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _FakeServer_FailWithHTTPError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FailWithHTTPErrorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FakeServerServer).FailWithHTTPError(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/server.FakeServer/FailWithHTTPError", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FakeServerServer).FailWithHTTPError(ctx, req.(*FailWithHTTPErrorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _FakeServer_serviceDesc = grpc.ServiceDesc{ + ServiceName: "server.FakeServer", + HandlerType: (*FakeServerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Succeed", + Handler: _FakeServer_Succeed_Handler, + }, + { + MethodName: "FailWithError", + Handler: _FakeServer_FailWithError_Handler, + }, + { + MethodName: "FailWithHTTPError", + Handler: _FakeServer_FailWithHTTPError_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "fake_server.proto", +} + +func init() { proto.RegisterFile("fake_server.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 185 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x4b, 0xcc, 0x4e, + 0x8d, 0x2f, 0x4e, 0x2d, 0x2a, 0x4b, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x83, + 0xf0, 0xa4, 0xa4, 0xd3, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0xc1, 0xa2, 0x49, 0xa5, 0x69, 0xfa, + 0xa9, 0xb9, 0x05, 0x25, 0x95, 0x10, 0x45, 0x4a, 0x7a, 0x5c, 0x12, 0x6e, 0x89, 0x99, 0x39, 0xe1, + 0x99, 0x25, 0x19, 0x1e, 0x21, 0x21, 0x01, 0xae, 0x45, 0x45, 0xf9, 0x45, 0x41, 0xa9, 0x85, 0xa5, + 0xa9, 0xc5, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0xce, 0xf9, 0x29, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, + 0xac, 0x41, 0x60, 0xb6, 0xd1, 0x5d, 0x46, 0x2e, 0x2e, 0xb7, 0xc4, 0xec, 0xd4, 0x60, 0xb0, 0xd9, + 0x42, 0xd6, 0x5c, 0xec, 0xc1, 0xa5, 0xc9, 0xc9, 0xa9, 0xa9, 0x29, 0x42, 0x62, 0x7a, 0x10, 0x7b, + 0xf4, 0x60, 0xf6, 0xe8, 0xb9, 0x82, 0xec, 0x91, 0xc2, 0x21, 0xae, 0xc4, 0x20, 0xe4, 0xc8, 0xc5, + 0x0b, 0xb3, 0x1b, 0x6c, 0x2f, 0x19, 0x46, 0xf8, 0x73, 0x09, 0x62, 0x38, 0x5f, 0x48, 0x41, 0x0f, + 0x1a, 0x0e, 0xb8, 0x7c, 0x86, 0xdb, 0xc0, 0x24, 0x36, 0xb0, 0x88, 0x31, 0x20, 0x00, 0x00, 0xff, + 0xff, 0xdc, 0x61, 0xa8, 0xf0, 0x50, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/weaveworks/common/server/server.go b/vendor/github.com/weaveworks/common/server/server.go index 6f9a11b1ce..6de17b4585 100644 --- a/vendor/github.com/weaveworks/common/server/server.go +++ b/vendor/github.com/weaveworks/common/server/server.go @@ -20,6 +20,7 @@ import ( "github.com/weaveworks-experiments/loki/pkg/client" "github.com/weaveworks/common/httpgrpc" + httpgrpc_server "github.com/weaveworks/common/httpgrpc/server" "github.com/weaveworks/common/middleware" "github.com/weaveworks/common/signals" ) @@ -149,7 +150,7 @@ func (s *Server) Run() { // Setup gRPC server // for HTTP over gRPC, ensure we don't double-count the middleware - httpgrpc.RegisterHTTPServer(s.GRPC, httpgrpc.NewServer(s.HTTP)) + httpgrpc.RegisterHTTPServer(s.GRPC, httpgrpc_server.NewServer(s.HTTP)) go s.GRPC.Serve(s.grpcListener) defer s.GRPC.GracefulStop() @@ -165,7 +166,7 @@ func (s *Server) Stop() { // Shutdown the server, gracefully. Should be defered after New(). func (s *Server) Shutdown() { ctx, cancel := context.WithTimeout(context.Background(), s.cfg.ServerGracefulShutdownTimeout) - defer cancel() // releases resources if httpServer.Churdown completes before timeout elapses + defer cancel() // releases resources if httpServer.Shutdown completes before timeout elapses s.httpServer.Shutdown(ctx) s.GRPC.Stop() diff --git a/vendor/github.com/weaveworks/common/user/logging.go b/vendor/github.com/weaveworks/common/user/logging.go new file mode 100644 index 0000000000..1180f1fdfb --- /dev/null +++ b/vendor/github.com/weaveworks/common/user/logging.go @@ -0,0 +1,21 @@ +package user + +import ( + "golang.org/x/net/context" + + log "github.com/Sirupsen/logrus" +) + +// LogFields returns user and org information from the context as log fields. +func LogFields(ctx context.Context) log.Fields { + fields := log.Fields{} + userID, err := ExtractUserID(ctx) + if err != nil { + fields["userID"] = userID + } + orgID, err := ExtractOrgID(ctx) + if err != nil { + fields["orgID"] = orgID + } + return fields +} diff --git a/vendor/golang.org/x/net/http2/ciphers.go b/vendor/golang.org/x/net/http2/ciphers.go new file mode 100644 index 0000000000..698860b777 --- /dev/null +++ b/vendor/golang.org/x/net/http2/ciphers.go @@ -0,0 +1,641 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +// A list of the possible cipher suite ids. Taken from +// http://www.iana.org/assignments/tls-parameters/tls-parameters.txt + +const ( + cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000 + cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001 + cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002 + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003 + cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004 + cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005 + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006 + cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007 + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008 + cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009 + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010 + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011 + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012 + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013 + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014 + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015 + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016 + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017 + cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018 + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019 + cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B + // Reserved uint16 = 0x001C-1D + cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F + cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020 + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021 + cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022 + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023 + cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024 + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025 + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026 + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027 + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028 + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029 + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B + cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C + cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D + cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E + cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030 + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031 + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032 + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033 + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034 + cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035 + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036 + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037 + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038 + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039 + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A + cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040 + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041 + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042 + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045 + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046 + // Reserved uint16 = 0x0047-4F + // Reserved uint16 = 0x0050-58 + // Reserved uint16 = 0x0059-5C + // Unassigned uint16 = 0x005D-5F + // Reserved uint16 = 0x0060-66 + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067 + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068 + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069 + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D + // Unassigned uint16 = 0x006E-83 + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085 + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089 + cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B + cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C + cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090 + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091 + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092 + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093 + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094 + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095 + cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096 + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097 + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098 + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099 + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D + cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E + cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0 + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1 + cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2 + cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3 + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4 + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5 + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6 + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7 + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8 + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9 + cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA + cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF + cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0 + cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1 + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2 + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3 + cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4 + cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5 + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6 + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7 + cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8 + cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9 + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1 + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5 + // Unassigned uint16 = 0x00C6-FE + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF + // Unassigned uint16 = 0x01-55,* + cipher_TLS_FALLBACK_SCSV uint16 = 0x5600 + // Unassigned uint16 = 0x5601 - 0xC000 + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001 + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002 + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003 + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004 + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005 + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006 + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007 + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008 + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009 + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A + cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010 + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011 + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012 + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013 + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014 + cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015 + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016 + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017 + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018 + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019 + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020 + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021 + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022 + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023 + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024 + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025 + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026 + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027 + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028 + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029 + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E + cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F + cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030 + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031 + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032 + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033 + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034 + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035 + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036 + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037 + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038 + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039 + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040 + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041 + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042 + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043 + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044 + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045 + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046 + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047 + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048 + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049 + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050 + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051 + cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052 + cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053 + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054 + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055 + cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056 + cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057 + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058 + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059 + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060 + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061 + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062 + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063 + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064 + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065 + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066 + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067 + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068 + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069 + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B + cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C + cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070 + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077 + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078 + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079 + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081 + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083 + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093 + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094 + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099 + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B + cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C + cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D + cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E + cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F + cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0 + cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1 + cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2 + cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3 + cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4 + cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5 + cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6 + cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7 + cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8 + cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9 + cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA + cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF + // Unassigned uint16 = 0xC0B0-FF + // Unassigned uint16 = 0xC1-CB,* + // Unassigned uint16 = 0xCC00-A7 + cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8 + cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9 + cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA + cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB + cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC + cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD + cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE +) + +// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. +// References: +// https://tools.ietf.org/html/rfc7540#appendix-A +// Reject cipher suites from Appendix A. +// "This list includes those cipher suites that do not +// offer an ephemeral key exchange and those that are +// based on the TLS null, stream or block cipher type" +func isBadCipher(cipher uint16) bool { + switch cipher { + case cipher_TLS_NULL_WITH_NULL_NULL, + cipher_TLS_RSA_WITH_NULL_MD5, + cipher_TLS_RSA_WITH_NULL_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_RSA_WITH_RC4_128_MD5, + cipher_TLS_RSA_WITH_RC4_128_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_RSA_WITH_IDEA_CBC_SHA, + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_RSA_WITH_DES_CBC_SHA, + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_DH_anon_WITH_RC4_128_MD5, + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_anon_WITH_DES_CBC_SHA, + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_SHA, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_RC4_128_SHA, + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_MD5, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5, + cipher_TLS_KRB5_WITH_RC4_128_MD5, + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_PSK_WITH_NULL_SHA, + cipher_TLS_DHE_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_NULL_SHA256, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_PSK_WITH_RC4_128_SHA, + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA, + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA, + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_PSK_WITH_NULL_SHA256, + cipher_TLS_PSK_WITH_NULL_SHA384, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_NULL_SHA256, + cipher_TLS_DHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_NULL_SHA256, + cipher_TLS_RSA_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV, + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_NULL_SHA, + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_NULL_SHA, + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA, + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_AES_128_CCM, + cipher_TLS_RSA_WITH_AES_256_CCM, + cipher_TLS_RSA_WITH_AES_128_CCM_8, + cipher_TLS_RSA_WITH_AES_256_CCM_8, + cipher_TLS_PSK_WITH_AES_128_CCM, + cipher_TLS_PSK_WITH_AES_256_CCM, + cipher_TLS_PSK_WITH_AES_128_CCM_8, + cipher_TLS_PSK_WITH_AES_256_CCM_8: + return true + default: + return false + } +} diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go index 4f720f530b..b65fc6d423 100644 --- a/vendor/golang.org/x/net/http2/configure_transport.go +++ b/vendor/golang.org/x/net/http2/configure_transport.go @@ -56,7 +56,7 @@ func configureTransport(t1 *http.Transport) (*Transport, error) { } // registerHTTPSProtocol calls Transport.RegisterProtocol but -// convering panics into errors. +// converting panics into errors. func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { defer func() { if e := recover(); e != nil { diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go new file mode 100644 index 0000000000..a3067f8de7 --- /dev/null +++ b/vendor/golang.org/x/net/http2/databuffer.go @@ -0,0 +1,146 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" + "sync" +) + +// Buffer chunks are allocated from a pool to reduce pressure on GC. +// The maximum wasted space per dataBuffer is 2x the largest size class, +// which happens when the dataBuffer has multiple chunks and there is +// one unread byte in both the first and last chunks. We use a few size +// classes to minimize overheads for servers that typically receive very +// small request bodies. +// +// TODO: Benchmark to determine if the pools are necessary. The GC may have +// improved enough that we can instead allocate chunks like this: +// make([]byte, max(16<<10, expectedBytesRemaining)) +var ( + dataChunkSizeClasses = []int{ + 1 << 10, + 2 << 10, + 4 << 10, + 8 << 10, + 16 << 10, + } + dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return make([]byte, 1<<10) }}, + {New: func() interface{} { return make([]byte, 2<<10) }}, + {New: func() interface{} { return make([]byte, 4<<10) }}, + {New: func() interface{} { return make([]byte, 8<<10) }}, + {New: func() interface{} { return make([]byte, 16<<10) }}, + } +) + +func getDataBufferChunk(size int64) []byte { + i := 0 + for ; i < len(dataChunkSizeClasses)-1; i++ { + if size <= int64(dataChunkSizeClasses[i]) { + break + } + } + return dataChunkPools[i].Get().([]byte) +} + +func putDataBufferChunk(p []byte) { + for i, n := range dataChunkSizeClasses { + if len(p) == n { + dataChunkPools[i].Put(p) + return + } + } + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) +} + +// dataBuffer is an io.ReadWriter backed by a list of data chunks. +// Each dataBuffer is used to read DATA frames on a single stream. +// The buffer is divided into chunks so the server can limit the +// total memory used by a single connection without limiting the +// request body size on any single stream. +type dataBuffer struct { + chunks [][]byte + r int // next byte to read is chunks[0][r] + w int // next byte to write is chunks[len(chunks)-1][w] + size int // total buffered bytes + expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0) +} + +var errReadEmpty = errors.New("read from empty dataBuffer") + +// Read copies bytes from the buffer into p. +// It is an error to read when no data is available. +func (b *dataBuffer) Read(p []byte) (int, error) { + if b.size == 0 { + return 0, errReadEmpty + } + var ntotal int + for len(p) > 0 && b.size > 0 { + readFrom := b.bytesFromFirstChunk() + n := copy(p, readFrom) + p = p[n:] + ntotal += n + b.r += n + b.size -= n + // If the first chunk has been consumed, advance to the next chunk. + if b.r == len(b.chunks[0]) { + putDataBufferChunk(b.chunks[0]) + end := len(b.chunks) - 1 + copy(b.chunks[:end], b.chunks[1:]) + b.chunks[end] = nil + b.chunks = b.chunks[:end] + b.r = 0 + } + } + return ntotal, nil +} + +func (b *dataBuffer) bytesFromFirstChunk() []byte { + if len(b.chunks) == 1 { + return b.chunks[0][b.r:b.w] + } + return b.chunks[0][b.r:] +} + +// Len returns the number of bytes of the unread portion of the buffer. +func (b *dataBuffer) Len() int { + return b.size +} + +// Write appends p to the buffer. +func (b *dataBuffer) Write(p []byte) (int, error) { + ntotal := len(p) + for len(p) > 0 { + // If the last chunk is empty, allocate a new chunk. Try to allocate + // enough to fully copy p plus any additional bytes we expect to + // receive. However, this may allocate less than len(p). + want := int64(len(p)) + if b.expected > want { + want = b.expected + } + chunk := b.lastChunkOrAlloc(want) + n := copy(chunk[b.w:], p) + p = p[n:] + b.w += n + b.size += n + b.expected -= int64(n) + } + return ntotal, nil +} + +func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte { + if len(b.chunks) != 0 { + last := b.chunks[len(b.chunks)-1] + if b.w < len(last) { + return last + } + } + chunk := getDataBufferChunk(want) + b.chunks = append(b.chunks, chunk) + b.w = 0 + return chunk +} diff --git a/vendor/golang.org/x/net/http2/fixed_buffer.go b/vendor/golang.org/x/net/http2/fixed_buffer.go deleted file mode 100644 index 47da0f0bf7..0000000000 --- a/vendor/golang.org/x/net/http2/fixed_buffer.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" -) - -// fixedBuffer is an io.ReadWriter backed by a fixed size buffer. -// It never allocates, but moves old data as new data is written. -type fixedBuffer struct { - buf []byte - r, w int -} - -var ( - errReadEmpty = errors.New("read from empty fixedBuffer") - errWriteFull = errors.New("write on full fixedBuffer") -) - -// Read copies bytes from the buffer into p. -// It is an error to read when no data is available. -func (b *fixedBuffer) Read(p []byte) (n int, err error) { - if b.r == b.w { - return 0, errReadEmpty - } - n = copy(p, b.buf[b.r:b.w]) - b.r += n - if b.r == b.w { - b.r = 0 - b.w = 0 - } - return n, nil -} - -// Len returns the number of bytes of the unread portion of the buffer. -func (b *fixedBuffer) Len() int { - return b.w - b.r -} - -// Write copies bytes from p into the buffer. -// It is an error to write more data than the buffer can hold. -func (b *fixedBuffer) Write(p []byte) (n int, err error) { - // Slide existing data to beginning. - if b.r > 0 && len(p) > len(b.buf)-b.w { - copy(b.buf, b.buf[b.r:b.w]) - b.w -= b.r - b.r = 0 - } - - // Write new data. - n = copy(b.buf[b.w:], p) - b.w += n - if n < len(p) { - err = errWriteFull - } - return n, err -} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 957358837c..3b14890728 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -122,7 +122,7 @@ var flagName = map[FrameType]map[Flags]string{ // a frameParser parses a frame given its FrameHeader and payload // bytes. The length of payload will always equal fh.Length (which // might be 0). -type frameParser func(fh FrameHeader, payload []byte) (Frame, error) +type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) var frameParsers = map[FrameType]frameParser{ FrameData: parseDataFrame, @@ -323,6 +323,8 @@ type Framer struct { debugFramerBuf *bytes.Buffer debugReadLoggerf func(string, ...interface{}) debugWriteLoggerf func(string, ...interface{}) + + frameCache *frameCache // nil if frames aren't reused (default) } func (fr *Framer) maxHeaderListSize() uint32 { @@ -398,6 +400,27 @@ const ( maxFrameSize = 1<<24 - 1 ) +// SetReuseFrames allows the Framer to reuse Frames. +// If called on a Framer, Frames returned by calls to ReadFrame are only +// valid until the next call to ReadFrame. +func (fr *Framer) SetReuseFrames() { + if fr.frameCache != nil { + return + } + fr.frameCache = &frameCache{} +} + +type frameCache struct { + dataFrame DataFrame +} + +func (fc *frameCache) getDataFrame() *DataFrame { + if fc == nil { + return &DataFrame{} + } + return &fc.dataFrame +} + // NewFramer returns a Framer that writes frames to w and reads them from r. func NewFramer(w io.Writer, r io.Reader) *Framer { fr := &Framer{ @@ -477,7 +500,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { if _, err := io.ReadFull(fr.r, payload); err != nil { return nil, err } - f, err := typeFrameParser(fh.Type)(fh, payload) + f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload) if err != nil { if ce, ok := err.(connError); ok { return nil, fr.connError(ce.Code, ce.Reason) @@ -565,7 +588,7 @@ func (f *DataFrame) Data() []byte { return f.data } -func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { +func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) { if fh.StreamID == 0 { // DATA frames MUST be associated with a stream. If a // DATA frame is received whose stream identifier @@ -574,9 +597,9 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { // PROTOCOL_ERROR. return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} } - f := &DataFrame{ - FrameHeader: fh, - } + f := fc.getDataFrame() + f.FrameHeader = fh + var padSize byte if fh.Flags.Has(FlagDataPadded) { var err error @@ -672,7 +695,7 @@ type SettingsFrame struct { p []byte } -func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { // When this (ACK 0x1) bit is set, the payload of the // SETTINGS frame MUST be empty. Receipt of a @@ -774,7 +797,7 @@ type PingFrame struct { func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } -func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) { +func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { if len(payload) != 8 { return nil, ConnectionError(ErrCodeFrameSize) } @@ -814,7 +837,7 @@ func (f *GoAwayFrame) DebugData() []byte { return f.debugData } -func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if fh.StreamID != 0 { return nil, ConnectionError(ErrCodeProtocol) } @@ -854,7 +877,7 @@ func (f *UnknownFrame) Payload() []byte { return f.p } -func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { return &UnknownFrame{fh, p}, nil } @@ -865,7 +888,7 @@ type WindowUpdateFrame struct { Increment uint32 // never read with high bit set } -func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if len(p) != 4 { return nil, ConnectionError(ErrCodeFrameSize) } @@ -930,7 +953,7 @@ func (f *HeadersFrame) HasPriority() bool { return f.FrameHeader.Flags.Has(FlagHeadersPriority) } -func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { +func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { hf := &HeadersFrame{ FrameHeader: fh, } @@ -1067,7 +1090,7 @@ func (p PriorityParam) IsZero() bool { return p == PriorityParam{} } -func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) { +func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { if fh.StreamID == 0 { return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} } @@ -1114,7 +1137,7 @@ type RSTStreamFrame struct { ErrCode ErrCode } -func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if len(p) != 4 { return nil, ConnectionError(ErrCodeFrameSize) } @@ -1144,7 +1167,7 @@ type ContinuationFrame struct { headerFragBuf []byte } -func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if fh.StreamID == 0 { return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} } @@ -1194,7 +1217,7 @@ func (f *PushPromiseFrame) HeadersEnded() bool { return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) } -func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) { +func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { pp := &PushPromiseFrame{ FrameHeader: fh, } diff --git a/vendor/golang.org/x/net/http2/go16.go b/vendor/golang.org/x/net/http2/go16.go index 2b72855f55..00b2e9e3cf 100644 --- a/vendor/golang.org/x/net/http2/go16.go +++ b/vendor/golang.org/x/net/http2/go16.go @@ -7,7 +7,6 @@ package http2 import ( - "crypto/tls" "net/http" "time" ) @@ -15,29 +14,3 @@ import ( func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { return t1.ExpectContinueTimeout } - -// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. -func isBadCipher(cipher uint16) bool { - switch cipher { - case tls.TLS_RSA_WITH_RC4_128_SHA, - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: - // Reject cipher suites from Appendix A. - // "This list includes those cipher suites that do not - // offer an ephemeral key exchange and those that are - // based on the TLS null, stream or block cipher type" - return true - default: - return false - } -} diff --git a/vendor/golang.org/x/net/http2/go18.go b/vendor/golang.org/x/net/http2/go18.go index 633202c39f..73cc2381f3 100644 --- a/vendor/golang.org/x/net/http2/go18.go +++ b/vendor/golang.org/x/net/http2/go18.go @@ -12,7 +12,11 @@ import ( "net/http" ) -func cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() } +func cloneTLSConfig(c *tls.Config) *tls.Config { + c2 := c.Clone() + c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264 + return c2 +} var _ http.Pusher = (*responseWriter)(nil) diff --git a/vendor/golang.org/x/net/http2/go19.go b/vendor/golang.org/x/net/http2/go19.go new file mode 100644 index 0000000000..38124ba56e --- /dev/null +++ b/vendor/golang.org/x/net/http2/go19.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package http2 + +import ( + "net/http" +) + +func configureServer19(s *http.Server, conf *Server) error { + s.RegisterOnShutdown(conf.state.startGracefulShutdown) + return nil +} diff --git a/vendor/golang.org/x/net/http2/h2demo/h2demo.go b/vendor/golang.org/x/net/http2/h2demo/h2demo.go index 980b6d67d6..9853107b91 100644 --- a/vendor/golang.org/x/net/http2/h2demo/h2demo.go +++ b/vendor/golang.org/x/net/http2/h2demo/h2demo.go @@ -87,6 +87,7 @@ href="https://golang.org/s/http2bug">file a bug.

  • GET /reqinfo to dump the request + headers received
  • GET /clockstream streams the current time every second
  • GET /gophertiles to see a page with a bunch of images
  • +
  • GET /serverpush to see a page with server push
  • GET /file/gopher.png for a small file (does If-Modified-Since, Content-Range, etc)
  • GET /file/go.src.tar.gz for a larger file (~10 MB)
  • GET /redirect to redirect back to / (this page)
  • @@ -168,8 +169,11 @@ var ( // fileServer returns a file-serving handler that proxies URL. // It lazily fetches URL on the first access and caches its contents forever. -func fileServer(url string) http.Handler { +func fileServer(url string, latency time.Duration) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if latency > 0 { + time.Sleep(latency) + } hi, err := fsGrp.Do(url, func() (interface{}, error) { fsMu.Lock() if h, ok := fsCache[url]; ok { @@ -227,14 +231,18 @@ func clockStreamHandler(w http.ResponseWriter, r *http.Request) { func registerHandlers() { tiles := newGopherTilesHandler() + push := newPushHandler() mux2 := http.NewServeMux() http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - if r.TLS == nil { - if r.URL.Path == "/gophertiles" { - tiles.ServeHTTP(w, r) - return - } + switch { + case r.URL.Path == "/gophertiles": + tiles.ServeHTTP(w, r) // allow HTTP/2 + HTTP/1.x + return + case strings.HasPrefix(r.URL.Path, "/serverpush"): + push.ServeHTTP(w, r) // allow HTTP/2 + HTTP/1.x + return + case r.TLS == nil: // do not allow HTTP/1.x for anything else http.Redirect(w, r, "https://"+httpsHost()+"/", http.StatusFound) return } @@ -249,8 +257,8 @@ func registerHandlers() { mux2.ServeHTTP(w, r) }) mux2.HandleFunc("/", home) - mux2.Handle("/file/gopher.png", fileServer("https://golang.org/doc/gopher/frontpage.png")) - mux2.Handle("/file/go.src.tar.gz", fileServer("https://storage.googleapis.com/golang/go1.4.1.src.tar.gz")) + mux2.Handle("/file/gopher.png", fileServer("https://golang.org/doc/gopher/frontpage.png", 0)) + mux2.Handle("/file/go.src.tar.gz", fileServer("https://storage.googleapis.com/golang/go1.4.1.src.tar.gz", 0)) mux2.HandleFunc("/reqinfo", reqInfoHandler) mux2.HandleFunc("/crc32", crcHandler) mux2.HandleFunc("/ECHO", echoCapitalHandler) @@ -267,6 +275,46 @@ func registerHandlers() { }) } +var pushResources = map[string]http.Handler{ + "/serverpush/static/jquery.min.js": fileServer("https://ajax.googleapis.com/ajax/libs/jquery/1.8.2/jquery.min.js", 100*time.Millisecond), + "/serverpush/static/godocs.js": fileServer("https://golang.org/lib/godoc/godocs.js", 100*time.Millisecond), + "/serverpush/static/playground.js": fileServer("https://golang.org/lib/godoc/playground.js", 100*time.Millisecond), + "/serverpush/static/style.css": fileServer("https://golang.org/lib/godoc/style.css", 100*time.Millisecond), +} + +func newPushHandler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for path, handler := range pushResources { + if r.URL.Path == path { + handler.ServeHTTP(w, r) + return + } + } + + cacheBust := time.Now().UnixNano() + if pusher, ok := w.(http.Pusher); ok { + for path := range pushResources { + url := fmt.Sprintf("%s?%d", path, cacheBust) + if err := pusher.Push(url, nil); err != nil { + log.Printf("Failed to push %v: %v", path, err) + } + } + } + time.Sleep(100 * time.Millisecond) // fake network latency + parsing time + if err := pushTmpl.Execute(w, struct { + CacheBust int64 + HTTPSHost string + HTTPHost string + }{ + CacheBust: cacheBust, + HTTPSHost: httpsHost(), + HTTPHost: httpHost(), + }); err != nil { + log.Printf("Executing server push template: %v", err) + } + }) +} + func newGopherTilesHandler() http.Handler { const gopherURL = "https://blog.golang.org/go-programming-language-turns-two_gophers.jpg" res, err := http.Get(gopherURL) @@ -393,7 +441,11 @@ func serveProdTLS() error { GetCertificate: m.GetCertificate, }, } - http2.ConfigureServer(srv, &http2.Server{}) + http2.ConfigureServer(srv, &http2.Server{ + NewWriteScheduler: func() http2.WriteScheduler { + return http2.NewPriorityWriteScheduler(nil) + }, + }) ln, err := net.Listen("tcp", ":443") if err != nil { return err diff --git a/vendor/golang.org/x/net/http2/h2demo/tmpl.go b/vendor/golang.org/x/net/http2/h2demo/tmpl.go new file mode 100644 index 0000000000..504d6a78a2 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/tmpl.go @@ -0,0 +1,1991 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build h2demo + +package main + +import "html/template" + +var pushTmpl = template.Must(template.New("serverpush").Parse(` + + + + + + + + + HTTP/2 Server Push Demo + + + + + + + + + +
    +Note: This page exists for demonstration purposes. For the actual cmd/go docs, go to golang.org/cmd/go. +
    + + + +
    +... +
    + + + + +
    +
    +
    +
    + Run + Format + + + +
    +
    + + +
    +
    + + +

    Command go

    + + + + + + + + + + + + + + +

    +Go is a tool for managing Go source code. +

    +

    +Usage: +

    +
    go command [arguments]
    +
    +

    +The commands are: +

    +
    build       compile packages and dependencies
    +clean       remove object files
    +doc         show documentation for package or symbol
    +env         print Go environment information
    +bug         start a bug report
    +fix         run go tool fix on packages
    +fmt         run gofmt on package sources
    +generate    generate Go files by processing source
    +get         download and install packages and dependencies
    +install     compile and install packages and dependencies
    +list        list packages
    +run         compile and run Go program
    +test        test packages
    +tool        run specified go tool
    +version     print Go version
    +vet         run go tool vet on packages
    +
    +

    +Use "go help [command]" for more information about a command. +

    +

    +Additional help topics: +

    +
    c           calling between Go and C
    +buildmode   description of build modes
    +filetype    file types
    +gopath      GOPATH environment variable
    +environment environment variables
    +importpath  import path syntax
    +packages    description of package lists
    +testflag    description of testing flags
    +testfunc    description of testing functions
    +
    +

    +Use "go help [topic]" for more information about that topic. +

    +

    Compile packages and dependencies

    +

    +Usage: +

    +
    go build [-o output] [-i] [build flags] [packages]
    +
    +

    +Build compiles the packages named by the import paths, +along with their dependencies, but it does not install the results. +

    +

    +If the arguments to build are a list of .go files, build treats +them as a list of source files specifying a single package. +

    +

    +When compiling a single main package, build writes +the resulting executable to an output file named after +the first source file ('go build ed.go rx.go' writes 'ed' or 'ed.exe') +or the source code directory ('go build unix/sam' writes 'sam' or 'sam.exe'). +The '.exe' suffix is added when writing a Windows executable. +

    +

    +When compiling multiple packages or a single non-main package, +build compiles the packages but discards the resulting object, +serving only as a check that the packages can be built. +

    +

    +When compiling packages, build ignores files that end in '_test.go'. +

    +

    +The -o flag, only allowed when compiling a single package, +forces build to write the resulting executable or object +to the named output file, instead of the default behavior described +in the last two paragraphs. +

    +

    +The -i flag installs the packages that are dependencies of the target. +

    +

    +The build flags are shared by the build, clean, get, install, list, run, +and test commands: +

    +
    -a
    +	force rebuilding of packages that are already up-to-date.
    +-n
    +	print the commands but do not run them.
    +-p n
    +	the number of programs, such as build commands or
    +	test binaries, that can be run in parallel.
    +	The default is the number of CPUs available.
    +-race
    +	enable data race detection.
    +	Supported only on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64.
    +-msan
    +	enable interoperation with memory sanitizer.
    +	Supported only on linux/amd64,
    +	and only with Clang/LLVM as the host C compiler.
    +-v
    +	print the names of packages as they are compiled.
    +-work
    +	print the name of the temporary work directory and
    +	do not delete it when exiting.
    +-x
    +	print the commands.
    +
    +-asmflags 'flag list'
    +	arguments to pass on each go tool asm invocation.
    +-buildmode mode
    +	build mode to use. See 'go help buildmode' for more.
    +-compiler name
    +	name of compiler to use, as in runtime.Compiler (gccgo or gc).
    +-gccgoflags 'arg list'
    +	arguments to pass on each gccgo compiler/linker invocation.
    +-gcflags 'arg list'
    +	arguments to pass on each go tool compile invocation.
    +-installsuffix suffix
    +	a suffix to use in the name of the package installation directory,
    +	in order to keep output separate from default builds.
    +	If using the -race flag, the install suffix is automatically set to race
    +	or, if set explicitly, has _race appended to it.  Likewise for the -msan
    +	flag.  Using a -buildmode option that requires non-default compile flags
    +	has a similar effect.
    +-ldflags 'flag list'
    +	arguments to pass on each go tool link invocation.
    +-linkshared
    +	link against shared libraries previously created with
    +	-buildmode=shared.
    +-pkgdir dir
    +	install and load all packages from dir instead of the usual locations.
    +	For example, when building with a non-standard configuration,
    +	use -pkgdir to keep generated packages in a separate location.
    +-tags 'tag list'
    +	a list of build tags to consider satisfied during the build.
    +	For more information about build tags, see the description of
    +	build constraints in the documentation for the go/build package.
    +-toolexec 'cmd args'
    +	a program to use to invoke toolchain programs like vet and asm.
    +	For example, instead of running asm, the go command will run
    +	'cmd args /path/to/asm <arguments for asm>'.
    +
    +

    +The list flags accept a space-separated list of strings. To embed spaces +in an element in the list, surround it with either single or double quotes. +

    +

    +For more about specifying packages, see 'go help packages'. +For more about where packages and binaries are installed, +run 'go help gopath'. +For more about calling between Go and C/C++, run 'go help c'. +

    +

    +Note: Build adheres to certain conventions such as those described +by 'go help gopath'. Not all projects can follow these conventions, +however. Installations that have their own conventions or that use +a separate software build system may choose to use lower-level +invocations such as 'go tool compile' and 'go tool link' to avoid +some of the overheads and design decisions of the build tool. +

    +

    +See also: go install, go get, go clean. +

    +

    Remove object files

    +

    +Usage: +

    +
    go clean [-i] [-r] [-n] [-x] [build flags] [packages]
    +
    +

    +Clean removes object files from package source directories. +The go command builds most objects in a temporary directory, +so go clean is mainly concerned with object files left by other +tools or by manual invocations of go build. +

    +

    +Specifically, clean removes the following files from each of the +source directories corresponding to the import paths: +

    +
    _obj/            old object directory, left from Makefiles
    +_test/           old test directory, left from Makefiles
    +_testmain.go     old gotest file, left from Makefiles
    +test.out         old test log, left from Makefiles
    +build.out        old test log, left from Makefiles
    +*.[568ao]        object files, left from Makefiles
    +
    +DIR(.exe)        from go build
    +DIR.test(.exe)   from go test -c
    +MAINFILE(.exe)   from go build MAINFILE.go
    +*.so             from SWIG
    +
    +

    +In the list, DIR represents the final path element of the +directory, and MAINFILE is the base name of any Go source +file in the directory that is not included when building +the package. +

    +

    +The -i flag causes clean to remove the corresponding installed +archive or binary (what 'go install' would create). +

    +

    +The -n flag causes clean to print the remove commands it would execute, +but not run them. +

    +

    +The -r flag causes clean to be applied recursively to all the +dependencies of the packages named by the import paths. +

    +

    +The -x flag causes clean to print remove commands as it executes them. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    Show documentation for package or symbol

    +

    +Usage: +

    +
    go doc [-u] [-c] [package|[package.]symbol[.method]]
    +
    +

    +Doc prints the documentation comments associated with the item identified by its +arguments (a package, const, func, type, var, or method) followed by a one-line +summary of each of the first-level items "under" that item (package-level +declarations for a package, methods for a type, etc.). +

    +

    +Doc accepts zero, one, or two arguments. +

    +

    +Given no arguments, that is, when run as +

    +
    go doc
    +
    +

    +it prints the package documentation for the package in the current directory. +If the package is a command (package main), the exported symbols of the package +are elided from the presentation unless the -cmd flag is provided. +

    +

    +When run with one argument, the argument is treated as a Go-syntax-like +representation of the item to be documented. What the argument selects depends +on what is installed in GOROOT and GOPATH, as well as the form of the argument, +which is schematically one of these: +

    +
    go doc <pkg>
    +go doc <sym>[.<method>]
    +go doc [<pkg>.]<sym>[.<method>]
    +go doc [<pkg>.][<sym>.]<method>
    +
    +

    +The first item in this list matched by the argument is the one whose documentation +is printed. (See the examples below.) However, if the argument starts with a capital +letter it is assumed to identify a symbol or method in the current directory. +

    +

    +For packages, the order of scanning is determined lexically in breadth-first order. +That is, the package presented is the one that matches the search and is nearest +the root and lexically first at its level of the hierarchy. The GOROOT tree is +always scanned in its entirety before GOPATH. +

    +

    +If there is no package specified or matched, the package in the current +directory is selected, so "go doc Foo" shows the documentation for symbol Foo in +the current package. +

    +

    +The package path must be either a qualified path or a proper suffix of a +path. The go tool's usual package mechanism does not apply: package path +elements like . and ... are not implemented by go doc. +

    +

    +When run with two arguments, the first must be a full package path (not just a +suffix), and the second is a symbol or symbol and method; this is similar to the +syntax accepted by godoc: +

    +
    go doc <pkg> <sym>[.<method>]
    +
    +

    +In all forms, when matching symbols, lower-case letters in the argument match +either case but upper-case letters match exactly. This means that there may be +multiple matches of a lower-case argument in a package if different symbols have +different cases. If this occurs, documentation for all matches is printed. +

    +

    +Examples: +

    +
    go doc
    +	Show documentation for current package.
    +go doc Foo
    +	Show documentation for Foo in the current package.
    +	(Foo starts with a capital letter so it cannot match
    +	a package path.)
    +go doc encoding/json
    +	Show documentation for the encoding/json package.
    +go doc json
    +	Shorthand for encoding/json.
    +go doc json.Number (or go doc json.number)
    +	Show documentation and method summary for json.Number.
    +go doc json.Number.Int64 (or go doc json.number.int64)
    +	Show documentation for json.Number's Int64 method.
    +go doc cmd/doc
    +	Show package docs for the doc command.
    +go doc -cmd cmd/doc
    +	Show package docs and exported symbols within the doc command.
    +go doc template.new
    +	Show documentation for html/template's New function.
    +	(html/template is lexically before text/template)
    +go doc text/template.new # One argument
    +	Show documentation for text/template's New function.
    +go doc text/template new # Two arguments
    +	Show documentation for text/template's New function.
    +
    +At least in the current tree, these invocations all print the
    +documentation for json.Decoder's Decode method:
    +
    +go doc json.Decoder.Decode
    +go doc json.decoder.decode
    +go doc json.decode
    +cd go/src/encoding/json; go doc decode
    +
    +

    +Flags: +

    +
    -c
    +	Respect case when matching symbols.
    +-cmd
    +	Treat a command (package main) like a regular package.
    +	Otherwise package main's exported symbols are hidden
    +	when showing the package's top-level documentation.
    +-u
    +	Show documentation for unexported as well as exported
    +	symbols and methods.
    +
    +

    Print Go environment information

    +

    +Usage: +

    +
    go env [var ...]
    +
    +

    +Env prints Go environment information. +

    +

    +By default env prints information as a shell script +(on Windows, a batch file). If one or more variable +names is given as arguments, env prints the value of +each named variable on its own line. +

    +

    Start a bug report

    +

    +Usage: +

    +
    go bug
    +
    +

    +Bug opens the default browser and starts a new bug report. +The report includes useful system information. +

    +

    Run go tool fix on packages

    +

    +Usage: +

    +
    go fix [packages]
    +
    +

    +Fix runs the Go fix command on the packages named by the import paths. +

    +

    +For more about fix, see 'go doc cmd/fix'. +For more about specifying packages, see 'go help packages'. +

    +

    +To run fix with specific options, run 'go tool fix'. +

    +

    +See also: go fmt, go vet. +

    +

    Run gofmt on package sources

    +

    +Usage: +

    +
    go fmt [-n] [-x] [packages]
    +
    +

    +Fmt runs the command 'gofmt -l -w' on the packages named +by the import paths. It prints the names of the files that are modified. +

    +

    +For more about gofmt, see 'go doc cmd/gofmt'. +For more about specifying packages, see 'go help packages'. +

    +

    +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. +

    +

    +To run gofmt with specific options, run gofmt itself. +

    +

    +See also: go fix, go vet. +

    +

    Generate Go files by processing source

    +

    +Usage: +

    +
    go generate [-run regexp] [-n] [-v] [-x] [build flags] [file.go... | packages]
    +
    +

    +Generate runs commands described by directives within existing +files. Those commands can run any process but the intent is to +create or update Go source files. +

    +

    +Go generate is never run automatically by go build, go get, go test, +and so on. It must be run explicitly. +

    +

    +Go generate scans the file for directives, which are lines of +the form, +

    +
    //go:generate command argument...
    +
    +

    +(note: no leading spaces and no space in "//go") where command +is the generator to be run, corresponding to an executable file +that can be run locally. It must either be in the shell path +(gofmt), a fully qualified path (/usr/you/bin/mytool), or a +command alias, described below. +

    +

    +Note that go generate does not parse the file, so lines that look +like directives in comments or multiline strings will be treated +as directives. +

    +

    +The arguments to the directive are space-separated tokens or +double-quoted strings passed to the generator as individual +arguments when it is run. +

    +

    +Quoted strings use Go syntax and are evaluated before execution; a +quoted string appears as a single argument to the generator. +

    +

    +Go generate sets several variables when it runs the generator: +

    +
    $GOARCH
    +	The execution architecture (arm, amd64, etc.)
    +$GOOS
    +	The execution operating system (linux, windows, etc.)
    +$GOFILE
    +	The base name of the file.
    +$GOLINE
    +	The line number of the directive in the source file.
    +$GOPACKAGE
    +	The name of the package of the file containing the directive.
    +$DOLLAR
    +	A dollar sign.
    +
    +

    +Other than variable substitution and quoted-string evaluation, no +special processing such as "globbing" is performed on the command +line. +

    +

    +As a last step before running the command, any invocations of any +environment variables with alphanumeric names, such as $GOFILE or +$HOME, are expanded throughout the command line. The syntax for +variable expansion is $NAME on all operating systems. Due to the +order of evaluation, variables are expanded even inside quoted +strings. If the variable NAME is not set, $NAME expands to the +empty string. +

    +

    +A directive of the form, +

    +
    //go:generate -command xxx args...
    +
    +

    +specifies, for the remainder of this source file only, that the +string xxx represents the command identified by the arguments. This +can be used to create aliases or to handle multiword generators. +For example, +

    +
    //go:generate -command foo go tool foo
    +
    +

    +specifies that the command "foo" represents the generator +"go tool foo". +

    +

    +Generate processes packages in the order given on the command line, +one at a time. If the command line lists .go files, they are treated +as a single package. Within a package, generate processes the +source files in a package in file name order, one at a time. Within +a source file, generate runs generators in the order they appear +in the file, one at a time. +

    +

    +If any generator returns an error exit status, "go generate" skips +all further processing for that package. +

    +

    +The generator is run in the package's source directory. +

    +

    +Go generate accepts one specific flag: +

    +
    -run=""
    +	if non-empty, specifies a regular expression to select
    +	directives whose full original source text (excluding
    +	any trailing spaces and final newline) matches the
    +	expression.
    +
    +

    +It also accepts the standard build flags including -v, -n, and -x. +The -v flag prints the names of packages and files as they are +processed. +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    Download and install packages and dependencies

    +

    +Usage: +

    +
    go get [-d] [-f] [-fix] [-insecure] [-t] [-u] [build flags] [packages]
    +
    +

    +Get downloads the packages named by the import paths, along with their +dependencies. It then installs the named packages, like 'go install'. +

    +

    +The -d flag instructs get to stop after downloading the packages; that is, +it instructs get not to install the packages. +

    +

    +The -f flag, valid only when -u is set, forces get -u not to verify that +each package has been checked out from the source control repository +implied by its import path. This can be useful if the source is a local fork +of the original. +

    +

    +The -fix flag instructs get to run the fix tool on the downloaded packages +before resolving dependencies or building the code. +

    +

    +The -insecure flag permits fetching from repositories and resolving +custom domains using insecure schemes such as HTTP. Use with caution. +

    +

    +The -t flag instructs get to also download the packages required to build +the tests for the specified packages. +

    +

    +The -u flag instructs get to use the network to update the named packages +and their dependencies. By default, get uses the network to check out +missing packages but does not use it to look for updates to existing packages. +

    +

    +The -v flag enables verbose progress and debug output. +

    +

    +Get also accepts build flags to control the installation. See 'go help build'. +

    +

    +When checking out a new package, get creates the target directory +GOPATH/src/<import-path>. If the GOPATH contains multiple entries, +get uses the first one. For more details see: 'go help gopath'. +

    +

    +When checking out or updating a package, get looks for a branch or tag +that matches the locally installed version of Go. The most important +rule is that if the local installation is running version "go1", get +searches for a branch or tag named "go1". If no such version exists it +retrieves the most recent version of the package. +

    +

    +When go get checks out or updates a Git repository, +it also updates any git submodules referenced by the repository. +

    +

    +Get never checks out or updates code stored in vendor directories. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    +For more about how 'go get' finds source code to +download, see 'go help importpath'. +

    +

    +See also: go build, go install, go clean. +

    +

    Compile and install packages and dependencies

    +

    +Usage: +

    +
    go install [build flags] [packages]
    +
    +

    +Install compiles and installs the packages named by the import paths, +along with their dependencies. +

    +

    +For more about the build flags, see 'go help build'. +For more about specifying packages, see 'go help packages'. +

    +

    +See also: go build, go get, go clean. +

    +

    List packages

    +

    +Usage: +

    +
    go list [-e] [-f format] [-json] [build flags] [packages]
    +
    +

    +List lists the packages named by the import paths, one per line. +

    +

    +The default output shows the package import path: +

    +
    bytes
    +encoding/json
    +github.com/gorilla/mux
    +golang.org/x/net/html
    +
    +

    +The -f flag specifies an alternate format for the list, using the +syntax of package template. The default output is equivalent to -f +''. The struct being passed to the template is: +

    +
    type Package struct {
    +    Dir           string // directory containing package sources
    +    ImportPath    string // import path of package in dir
    +    ImportComment string // path in import comment on package statement
    +    Name          string // package name
    +    Doc           string // package documentation string
    +    Target        string // install path
    +    Shlib         string // the shared library that contains this package (only set when -linkshared)
    +    Goroot        bool   // is this package in the Go root?
    +    Standard      bool   // is this package part of the standard Go library?
    +    Stale         bool   // would 'go install' do anything for this package?
    +    StaleReason   string // explanation for Stale==true
    +    Root          string // Go root or Go path dir containing this package
    +    ConflictDir   string // this directory shadows Dir in $GOPATH
    +    BinaryOnly    bool   // binary-only package: cannot be recompiled from sources
    +
    +    // Source files
    +    GoFiles        []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
    +    CgoFiles       []string // .go sources files that import "C"
    +    IgnoredGoFiles []string // .go sources ignored due to build constraints
    +    CFiles         []string // .c source files
    +    CXXFiles       []string // .cc, .cxx and .cpp source files
    +    MFiles         []string // .m source files
    +    HFiles         []string // .h, .hh, .hpp and .hxx source files
    +    FFiles         []string // .f, .F, .for and .f90 Fortran source files
    +    SFiles         []string // .s source files
    +    SwigFiles      []string // .swig files
    +    SwigCXXFiles   []string // .swigcxx files
    +    SysoFiles      []string // .syso object files to add to archive
    +    TestGoFiles    []string // _test.go files in package
    +    XTestGoFiles   []string // _test.go files outside package
    +
    +    // Cgo directives
    +    CgoCFLAGS    []string // cgo: flags for C compiler
    +    CgoCPPFLAGS  []string // cgo: flags for C preprocessor
    +    CgoCXXFLAGS  []string // cgo: flags for C++ compiler
    +    CgoFFLAGS    []string // cgo: flags for Fortran compiler
    +    CgoLDFLAGS   []string // cgo: flags for linker
    +    CgoPkgConfig []string // cgo: pkg-config names
    +
    +    // Dependency information
    +    Imports      []string // import paths used by this package
    +    Deps         []string // all (recursively) imported dependencies
    +    TestImports  []string // imports from TestGoFiles
    +    XTestImports []string // imports from XTestGoFiles
    +
    +    // Error information
    +    Incomplete bool            // this package or a dependency has an error
    +    Error      *PackageError   // error loading package
    +    DepsErrors []*PackageError // errors loading dependencies
    +}
    +
    +

    +Packages stored in vendor directories report an ImportPath that includes the +path to the vendor directory (for example, "d/vendor/p" instead of "p"), +so that the ImportPath uniquely identifies a given copy of a package. +The Imports, Deps, TestImports, and XTestImports lists also contain these +expanded imports paths. See golang.org/s/go15vendor for more about vendoring. +

    +

    +The error information, if any, is +

    +
    type PackageError struct {
    +    ImportStack   []string // shortest path from package named on command line to this one
    +    Pos           string   // position of error (if present, file:line:col)
    +    Err           string   // the error itself
    +}
    +
    +

    +The template function "join" calls strings.Join. +

    +

    +The template function "context" returns the build context, defined as: +

    +
    type Context struct {
    +	GOARCH        string   // target architecture
    +	GOOS          string   // target operating system
    +	GOROOT        string   // Go root
    +	GOPATH        string   // Go path
    +	CgoEnabled    bool     // whether cgo can be used
    +	UseAllFiles   bool     // use files regardless of +build lines, file names
    +	Compiler      string   // compiler to assume when computing target paths
    +	BuildTags     []string // build constraints to match in +build lines
    +	ReleaseTags   []string // releases the current release is compatible with
    +	InstallSuffix string   // suffix to use in the name of the install dir
    +}
    +
    +

    +For more information about the meaning of these fields see the documentation +for the go/build package's Context type. +

    +

    +The -json flag causes the package data to be printed in JSON format +instead of using the template format. +

    +

    +The -e flag changes the handling of erroneous packages, those that +cannot be found or are malformed. By default, the list command +prints an error to standard error for each erroneous package and +omits the packages from consideration during the usual printing. +With the -e flag, the list command never prints errors to standard +error and instead processes the erroneous packages with the usual +printing. Erroneous packages will have a non-empty ImportPath and +a non-nil Error field; other information may or may not be missing +(zeroed). +

    +

    +For more about build flags, see 'go help build'. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    Compile and run Go program

    +

    +Usage: +

    +
    go run [build flags] [-exec xprog] gofiles... [arguments...]
    +
    +

    +Run compiles and runs the main package comprising the named Go source files. +A Go source file is defined to be a file ending in a literal ".go" suffix. +

    +

    +By default, 'go run' runs the compiled binary directly: 'a.out arguments...'. +If the -exec flag is given, 'go run' invokes the binary using xprog: +

    +
    'xprog a.out arguments...'.
    +
    +

    +If the -exec flag is not given, GOOS or GOARCH is different from the system +default, and a program named go_$GOOS_$GOARCH_exec can be found +on the current search path, 'go run' invokes the binary using that program, +for example 'go_nacl_386_exec a.out arguments...'. This allows execution of +cross-compiled programs when a simulator or other execution method is +available. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +See also: go build. +

    +

    Test packages

    +

    +Usage: +

    +
    go test [build/test flags] [packages] [build/test flags & test binary flags]
    +
    +

    +'Go test' automates testing the packages named by the import paths. +It prints a summary of the test results in the format: +

    +
    ok   archive/tar   0.011s
    +FAIL archive/zip   0.022s
    +ok   compress/gzip 0.033s
    +...
    +
    +

    +followed by detailed output for each failed package. +

    +

    +'Go test' recompiles each package along with any files with names matching +the file pattern "*_test.go". +Files whose names begin with "_" (including "_test.go") or "." are ignored. +These additional files can contain test functions, benchmark functions, and +example functions. See 'go help testfunc' for more. +Each listed package causes the execution of a separate test binary. +

    +

    +Test files that declare a package with the suffix "_test" will be compiled as a +separate package, and then linked and run with the main test binary. +

    +

    +The go tool will ignore a directory named "testdata", making it available +to hold ancillary data needed by the tests. +

    +

    +By default, go test needs no arguments. It compiles and tests the package +with source in the current directory, including tests, and runs the tests. +

    +

    +The package is built in a temporary directory so it does not interfere with the +non-test installation. +

    +

    +In addition to the build flags, the flags handled by 'go test' itself are: +

    +
    -args
    +    Pass the remainder of the command line (everything after -args)
    +    to the test binary, uninterpreted and unchanged.
    +    Because this flag consumes the remainder of the command line,
    +    the package list (if present) must appear before this flag.
    +
    +-c
    +    Compile the test binary to pkg.test but do not run it
    +    (where pkg is the last element of the package's import path).
    +    The file name can be changed with the -o flag.
    +
    +-exec xprog
    +    Run the test binary using xprog. The behavior is the same as
    +    in 'go run'. See 'go help run' for details.
    +
    +-i
    +    Install packages that are dependencies of the test.
    +    Do not run the test.
    +
    +-o file
    +    Compile the test binary to the named file.
    +    The test still runs (unless -c or -i is specified).
    +
    +

    +The test binary also accepts flags that control execution of the test; these +flags are also accessible by 'go test'. See 'go help testflag' for details. +

    +

    +For more about build flags, see 'go help build'. +For more about specifying packages, see 'go help packages'. +

    +

    +See also: go build, go vet. +

    +

    Run specified go tool

    +

    +Usage: +

    +
    go tool [-n] command [args...]
    +
    +

    +Tool runs the go tool command identified by the arguments. +With no arguments it prints the list of known tools. +

    +

    +The -n flag causes tool to print the command that would be +executed but not execute it. +

    +

    +For more about each tool command, see 'go tool command -h'. +

    +

    Print Go version

    +

    +Usage: +

    +
    go version
    +
    +

    +Version prints the Go version, as reported by runtime.Version. +

    +

    Run go tool vet on packages

    +

    +Usage: +

    +
    go vet [-n] [-x] [build flags] [packages]
    +
    +

    +Vet runs the Go vet command on the packages named by the import paths. +

    +

    +For more about vet, see 'go doc cmd/vet'. +For more about specifying packages, see 'go help packages'. +

    +

    +To run the vet tool with specific options, run 'go tool vet'. +

    +

    +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +See also: go fmt, go fix. +

    +

    Calling between Go and C

    +

    +There are two different ways to call between Go and C/C++ code. +

    +

    +The first is the cgo tool, which is part of the Go distribution. For +information on how to use it see the cgo documentation (go doc cmd/cgo). +

    +

    +The second is the SWIG program, which is a general tool for +interfacing between languages. For information on SWIG see +http://swig.org/. When running go build, any file with a .swig +extension will be passed to SWIG. Any file with a .swigcxx extension +will be passed to SWIG with the -c++ option. +

    +

    +When either cgo or SWIG is used, go build will pass any .c, .m, .s, +or .S files to the C compiler, and any .cc, .cpp, .cxx files to the C++ +compiler. The CC or CXX environment variables may be set to determine +the C or C++ compiler, respectively, to use. +

    +

    Description of build modes

    +

    +The 'go build' and 'go install' commands take a -buildmode argument which +indicates which kind of object file is to be built. Currently supported values +are: +

    +
    -buildmode=archive
    +	Build the listed non-main packages into .a files. Packages named
    +	main are ignored.
    +
    +-buildmode=c-archive
    +	Build the listed main package, plus all packages it imports,
    +	into a C archive file. The only callable symbols will be those
    +	functions exported using a cgo //export comment. Requires
    +	exactly one main package to be listed.
    +
    +-buildmode=c-shared
    +	Build the listed main packages, plus all packages that they
    +	import, into C shared libraries. The only callable symbols will
    +	be those functions exported using a cgo //export comment.
    +	Non-main packages are ignored.
    +
    +-buildmode=default
    +	Listed main packages are built into executables and listed
    +	non-main packages are built into .a files (the default
    +	behavior).
    +
    +-buildmode=shared
    +	Combine all the listed non-main packages into a single shared
    +	library that will be used when building with the -linkshared
    +	option. Packages named main are ignored.
    +
    +-buildmode=exe
    +	Build the listed main packages and everything they import into
    +	executables. Packages not named main are ignored.
    +
    +-buildmode=pie
    +	Build the listed main packages and everything they import into
    +	position independent executables (PIE). Packages not named
    +	main are ignored.
    +
    +-buildmode=plugin
    +	Build the listed main packages, plus all packages that they
    +	import, into a Go plugin. Packages not named main are ignored.
    +
    +

    File types

    +

    +The go command examines the contents of a restricted set of files +in each directory. It identifies which files to examine based on +the extension of the file name. These extensions are: +

    +
    .go
    +	Go source files.
    +.c, .h
    +	C source files.
    +	If the package uses cgo or SWIG, these will be compiled with the
    +	OS-native compiler (typically gcc); otherwise they will
    +	trigger an error.
    +.cc, .cpp, .cxx, .hh, .hpp, .hxx
    +	C++ source files. Only useful with cgo or SWIG, and always
    +	compiled with the OS-native compiler.
    +.m
    +	Objective-C source files. Only useful with cgo, and always
    +	compiled with the OS-native compiler.
    +.s, .S
    +	Assembler source files.
    +	If the package uses cgo or SWIG, these will be assembled with the
    +	OS-native assembler (typically gcc (sic)); otherwise they
    +	will be assembled with the Go assembler.
    +.swig, .swigcxx
    +	SWIG definition files.
    +.syso
    +	System object files.
    +
    +

    +Files of each of these types except .syso may contain build +constraints, but the go command stops scanning for build constraints +at the first item in the file that is not a blank line or //-style +line comment. See the go/build package documentation for +more details. +

    +

    +Non-test Go source files can also include a //go:binary-only-package +comment, indicating that the package sources are included +for documentation only and must not be used to build the +package binary. This enables distribution of Go packages in +their compiled form alone. See the go/build package documentation +for more details. +

    +

    GOPATH environment variable

    +

    +The Go path is used to resolve import statements. +It is implemented by and documented in the go/build package. +

    +

    +The GOPATH environment variable lists places to look for Go code. +On Unix, the value is a colon-separated string. +On Windows, the value is a semicolon-separated string. +On Plan 9, the value is a list. +

    +

    +If the environment variable is unset, GOPATH defaults +to a subdirectory named "go" in the user's home directory +($HOME/go on Unix, %USERPROFILE%\go on Windows), +unless that directory holds a Go distribution. +Run "go env GOPATH" to see the current GOPATH. +

    +

    +See https://golang.org/wiki/SettingGOPATH to set a custom GOPATH. +

    +

    +Each directory listed in GOPATH must have a prescribed structure: +

    +

    +The src directory holds source code. The path below src +determines the import path or executable name. +

    +

    +The pkg directory holds installed package objects. +As in the Go tree, each target operating system and +architecture pair has its own subdirectory of pkg +(pkg/GOOS_GOARCH). +

    +

    +If DIR is a directory listed in the GOPATH, a package with +source in DIR/src/foo/bar can be imported as "foo/bar" and +has its compiled form installed to "DIR/pkg/GOOS_GOARCH/foo/bar.a". +

    +

    +The bin directory holds compiled commands. +Each command is named for its source directory, but only +the final element, not the entire path. That is, the +command with source in DIR/src/foo/quux is installed into +DIR/bin/quux, not DIR/bin/foo/quux. The "foo/" prefix is stripped +so that you can add DIR/bin to your PATH to get at the +installed commands. If the GOBIN environment variable is +set, commands are installed to the directory it names instead +of DIR/bin. GOBIN must be an absolute path. +

    +

    +Here's an example directory layout: +

    +
    GOPATH=/home/user/go
    +
    +/home/user/go/
    +    src/
    +        foo/
    +            bar/               (go code in package bar)
    +                x.go
    +            quux/              (go code in package main)
    +                y.go
    +    bin/
    +        quux                   (installed command)
    +    pkg/
    +        linux_amd64/
    +            foo/
    +                bar.a          (installed package object)
    +
    +

    +Go searches each directory listed in GOPATH to find source code, +but new packages are always downloaded into the first directory +in the list. +

    +

    +See https://golang.org/doc/code.html for an example. +

    +

    Internal Directories

    +

    +Code in or below a directory named "internal" is importable only +by code in the directory tree rooted at the parent of "internal". +Here's an extended version of the directory layout above: +

    +
    /home/user/go/
    +    src/
    +        crash/
    +            bang/              (go code in package bang)
    +                b.go
    +        foo/                   (go code in package foo)
    +            f.go
    +            bar/               (go code in package bar)
    +                x.go
    +            internal/
    +                baz/           (go code in package baz)
    +                    z.go
    +            quux/              (go code in package main)
    +                y.go
    +
    +

    +The code in z.go is imported as "foo/internal/baz", but that +import statement can only appear in source files in the subtree +rooted at foo. The source files foo/f.go, foo/bar/x.go, and +foo/quux/y.go can all import "foo/internal/baz", but the source file +crash/bang/b.go cannot. +

    +

    +See https://golang.org/s/go14internal for details. +

    +

    Vendor Directories

    +

    +Go 1.6 includes support for using local copies of external dependencies +to satisfy imports of those dependencies, often referred to as vendoring. +

    +

    +Code below a directory named "vendor" is importable only +by code in the directory tree rooted at the parent of "vendor", +and only using an import path that omits the prefix up to and +including the vendor element. +

    +

    +Here's the example from the previous section, +but with the "internal" directory renamed to "vendor" +and a new foo/vendor/crash/bang directory added: +

    +
    /home/user/go/
    +    src/
    +        crash/
    +            bang/              (go code in package bang)
    +                b.go
    +        foo/                   (go code in package foo)
    +            f.go
    +            bar/               (go code in package bar)
    +                x.go
    +            vendor/
    +                crash/
    +                    bang/      (go code in package bang)
    +                        b.go
    +                baz/           (go code in package baz)
    +                    z.go
    +            quux/              (go code in package main)
    +                y.go
    +
    +

    +The same visibility rules apply as for internal, but the code +in z.go is imported as "baz", not as "foo/vendor/baz". +

    +

    +Code in vendor directories deeper in the source tree shadows +code in higher directories. Within the subtree rooted at foo, an import +of "crash/bang" resolves to "foo/vendor/crash/bang", not the +top-level "crash/bang". +

    +

    +Code in vendor directories is not subject to import path +checking (see 'go help importpath'). +

    +

    +When 'go get' checks out or updates a git repository, it now also +updates submodules. +

    +

    +Vendor directories do not affect the placement of new repositories +being checked out for the first time by 'go get': those are always +placed in the main GOPATH, never in a vendor subtree. +

    +

    +See https://golang.org/s/go15vendor for details. +

    +

    Environment variables

    +

    +The go command, and the tools it invokes, examine a few different +environment variables. For many of these, you can see the default +value of on your system by running 'go env NAME', where NAME is the +name of the variable. +

    +

    +General-purpose environment variables: +

    +
    GCCGO
    +	The gccgo command to run for 'go build -compiler=gccgo'.
    +GOARCH
    +	The architecture, or processor, for which to compile code.
    +	Examples are amd64, 386, arm, ppc64.
    +GOBIN
    +	The directory where 'go install' will install a command.
    +GOOS
    +	The operating system for which to compile code.
    +	Examples are linux, darwin, windows, netbsd.
    +GOPATH
    +	For more details see: 'go help gopath'.
    +GORACE
    +	Options for the race detector.
    +	See https://golang.org/doc/articles/race_detector.html.
    +GOROOT
    +	The root of the go tree.
    +
    +

    +Environment variables for use with cgo: +

    +
    CC
    +	The command to use to compile C code.
    +CGO_ENABLED
    +	Whether the cgo command is supported.  Either 0 or 1.
    +CGO_CFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	C code.
    +CGO_CPPFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	C or C++ code.
    +CGO_CXXFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	C++ code.
    +CGO_FFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	Fortran code.
    +CGO_LDFLAGS
    +	Flags that cgo will pass to the compiler when linking.
    +CXX
    +	The command to use to compile C++ code.
    +PKG_CONFIG
    +	Path to pkg-config tool.
    +
    +

    +Architecture-specific environment variables: +

    +
    GOARM
    +	For GOARCH=arm, the ARM architecture for which to compile.
    +	Valid values are 5, 6, 7.
    +GO386
    +	For GOARCH=386, the floating point instruction set.
    +	Valid values are 387, sse2.
    +
    +

    +Special-purpose environment variables: +

    +
    GOROOT_FINAL
    +	The root of the installed Go tree, when it is
    +	installed in a location other than where it is built.
    +	File names in stack traces are rewritten from GOROOT to
    +	GOROOT_FINAL.
    +GO_EXTLINK_ENABLED
    +	Whether the linker should use external linking mode
    +	when using -linkmode=auto with code that uses cgo.
    +	Set to 0 to disable external linking mode, 1 to enable it.
    +GIT_ALLOW_PROTOCOL
    +	Defined by Git. A colon-separated list of schemes that are allowed to be used
    +	with git fetch/clone. If set, any scheme not explicitly mentioned will be
    +	considered insecure by 'go get'.
    +
    +

    Import path syntax

    +

    +An import path (see 'go help packages') denotes a package stored in the local +file system. In general, an import path denotes either a standard package (such +as "unicode/utf8") or a package found in one of the work spaces (For more +details see: 'go help gopath'). +

    +

    Relative import paths

    +

    +An import path beginning with ./ or ../ is called a relative path. +The toolchain supports relative import paths as a shortcut in two ways. +

    +

    +First, a relative path can be used as a shorthand on the command line. +If you are working in the directory containing the code imported as +"unicode" and want to run the tests for "unicode/utf8", you can type +"go test ./utf8" instead of needing to specify the full path. +Similarly, in the reverse situation, "go test .." will test "unicode" from +the "unicode/utf8" directory. Relative patterns are also allowed, like +"go test ./..." to test all subdirectories. See 'go help packages' for details +on the pattern syntax. +

    +

    +Second, if you are compiling a Go program not in a work space, +you can use a relative path in an import statement in that program +to refer to nearby code also not in a work space. +This makes it easy to experiment with small multipackage programs +outside of the usual work spaces, but such programs cannot be +installed with "go install" (there is no work space in which to install them), +so they are rebuilt from scratch each time they are built. +To avoid ambiguity, Go programs cannot use relative import paths +within a work space. +

    +

    Remote import paths

    +

    +Certain import paths also +describe how to obtain the source code for the package using +a revision control system. +

    +

    +A few common code hosting sites have special syntax: +

    +
    Bitbucket (Git, Mercurial)
    +
    +	import "bitbucket.org/user/project"
    +	import "bitbucket.org/user/project/sub/directory"
    +
    +GitHub (Git)
    +
    +	import "github.com/user/project"
    +	import "github.com/user/project/sub/directory"
    +
    +Launchpad (Bazaar)
    +
    +	import "launchpad.net/project"
    +	import "launchpad.net/project/series"
    +	import "launchpad.net/project/series/sub/directory"
    +
    +	import "launchpad.net/~user/project/branch"
    +	import "launchpad.net/~user/project/branch/sub/directory"
    +
    +IBM DevOps Services (Git)
    +
    +	import "hub.jazz.net/git/user/project"
    +	import "hub.jazz.net/git/user/project/sub/directory"
    +
    +

    +For code hosted on other servers, import paths may either be qualified +with the version control type, or the go tool can dynamically fetch +the import path over https/http and discover where the code resides +from a <meta> tag in the HTML. +

    +

    +To declare the code location, an import path of the form +

    +
    repository.vcs/path
    +
    +

    +specifies the given repository, with or without the .vcs suffix, +using the named version control system, and then the path inside +that repository. The supported version control systems are: +

    +
    Bazaar      .bzr
    +Git         .git
    +Mercurial   .hg
    +Subversion  .svn
    +
    +

    +For example, +

    +
    import "example.org/user/foo.hg"
    +
    +

    +denotes the root directory of the Mercurial repository at +example.org/user/foo or foo.hg, and +

    +
    import "example.org/repo.git/foo/bar"
    +
    +

    +denotes the foo/bar directory of the Git repository at +example.org/repo or repo.git. +

    +

    +When a version control system supports multiple protocols, +each is tried in turn when downloading. For example, a Git +download tries https://, then git+ssh://. +

    +

    +By default, downloads are restricted to known secure protocols +(e.g. https, ssh). To override this setting for Git downloads, the +GIT_ALLOW_PROTOCOL environment variable can be set (For more details see: +'go help environment'). +

    +

    +If the import path is not a known code hosting site and also lacks a +version control qualifier, the go tool attempts to fetch the import +over https/http and looks for a <meta> tag in the document's HTML +<head>. +

    +

    +The meta tag has the form: +

    +
    <meta name="go-import" content="import-prefix vcs repo-root">
    +
    +

    +The import-prefix is the import path corresponding to the repository +root. It must be a prefix or an exact match of the package being +fetched with "go get". If it's not an exact match, another http +request is made at the prefix to verify the <meta> tags match. +

    +

    +The meta tag should appear as early in the file as possible. +In particular, it should appear before any raw JavaScript or CSS, +to avoid confusing the go command's restricted parser. +

    +

    +The vcs is one of "git", "hg", "svn", etc, +

    +

    +The repo-root is the root of the version control system +containing a scheme and not containing a .vcs qualifier. +

    +

    +For example, +

    +
    import "example.org/pkg/foo"
    +
    +

    +will result in the following requests: +

    +
    https://example.org/pkg/foo?go-get=1 (preferred)
    +http://example.org/pkg/foo?go-get=1  (fallback, only with -insecure)
    +
    +

    +If that page contains the meta tag +

    +
    <meta name="go-import" content="example.org git https://code.org/r/p/exproj">
    +
    +

    +the go tool will verify that https://example.org/?go-get=1 contains the +same meta tag and then git clone https://code.org/r/p/exproj into +GOPATH/src/example.org. +

    +

    +New downloaded packages are written to the first directory listed in the GOPATH +environment variable (For more details see: 'go help gopath'). +

    +

    +The go command attempts to download the version of the +package appropriate for the Go release being used. +Run 'go help get' for more. +

    +

    Import path checking

    +

    +When the custom import path feature described above redirects to a +known code hosting site, each of the resulting packages has two possible +import paths, using the custom domain or the known hosting site. +

    +

    +A package statement is said to have an "import comment" if it is immediately +followed (before the next newline) by a comment of one of these two forms: +

    +
    package math // import "path"
    +package math /* import "path" */
    +
    +

    +The go command will refuse to install a package with an import comment +unless it is being referred to by that import path. In this way, import comments +let package authors make sure the custom import path is used and not a +direct path to the underlying code hosting site. +

    +

    +Import path checking is disabled for code found within vendor trees. +This makes it possible to copy code into alternate locations in vendor trees +without needing to update import comments. +

    +

    +See https://golang.org/s/go14customimport for details. +

    +

    Description of package lists

    +

    +Many commands apply to a set of packages: +

    +
    go action [packages]
    +
    +

    +Usually, [packages] is a list of import paths. +

    +

    +An import path that is a rooted path or that begins with +a . or .. element is interpreted as a file system path and +denotes the package in that directory. +

    +

    +Otherwise, the import path P denotes the package found in +the directory DIR/src/P for some DIR listed in the GOPATH +environment variable (For more details see: 'go help gopath'). +

    +

    +If no import paths are given, the action applies to the +package in the current directory. +

    +

    +There are four reserved names for paths that should not be used +for packages to be built with the go tool: +

    +

    +- "main" denotes the top-level package in a stand-alone executable. +

    +

    +- "all" expands to all package directories found in all the GOPATH +trees. For example, 'go list all' lists all the packages on the local +system. +

    +

    +- "std" is like all but expands to just the packages in the standard +Go library. +

    +

    +- "cmd" expands to the Go repository's commands and their +internal libraries. +

    +

    +Import paths beginning with "cmd/" only match source code in +the Go repository. +

    +

    +An import path is a pattern if it includes one or more "..." wildcards, +each of which can match any string, including the empty string and +strings containing slashes. Such a pattern expands to all package +directories found in the GOPATH trees with names matching the +patterns. As a special case, x/... matches x as well as x's subdirectories. +For example, net/... expands to net and packages in its subdirectories. +

    +

    +An import path can also name a package to be downloaded from +a remote repository. Run 'go help importpath' for details. +

    +

    +Every package in a program must have a unique import path. +By convention, this is arranged by starting each path with a +unique prefix that belongs to you. For example, paths used +internally at Google all begin with 'google', and paths +denoting remote repositories begin with the path to the code, +such as 'github.com/user/repo'. +

    +

    +Packages in a program need not have unique package names, +but there are two reserved package names with special meaning. +The name main indicates a command, not a library. +Commands are built into binaries and cannot be imported. +The name documentation indicates documentation for +a non-Go program in the directory. Files in package documentation +are ignored by the go command. +

    +

    +As a special case, if the package list is a list of .go files from a +single directory, the command is applied to a single synthesized +package made up of exactly those files, ignoring any build constraints +in those files and ignoring any other files in the directory. +

    +

    +Directory and file names that begin with "." or "_" are ignored +by the go tool, as are directories named "testdata". +

    +

    Description of testing flags

    +

    +The 'go test' command takes both flags that apply to 'go test' itself +and flags that apply to the resulting test binary. +

    +

    +Several of the flags control profiling and write an execution profile +suitable for "go tool pprof"; run "go tool pprof -h" for more +information. The --alloc_space, --alloc_objects, and --show_bytes +options of pprof control how the information is presented. +

    +

    +The following flags are recognized by the 'go test' command and +control the execution of any test: +

    +
    -bench regexp
    +    Run (sub)benchmarks matching a regular expression.
    +    The given regular expression is split into smaller ones by
    +    top-level '/', where each must match the corresponding part of a
    +    benchmark's identifier.
    +    By default, no benchmarks run. To run all benchmarks,
    +    use '-bench .' or '-bench=.'.
    +
    +-benchtime t
    +    Run enough iterations of each benchmark to take t, specified
    +    as a time.Duration (for example, -benchtime 1h30s).
    +    The default is 1 second (1s).
    +
    +-count n
    +    Run each test and benchmark n times (default 1).
    +    If -cpu is set, run n times for each GOMAXPROCS value.
    +    Examples are always run once.
    +
    +-cover
    +    Enable coverage analysis.
    +
    +-covermode set,count,atomic
    +    Set the mode for coverage analysis for the package[s]
    +    being tested. The default is "set" unless -race is enabled,
    +    in which case it is "atomic".
    +    The values:
    +	set: bool: does this statement run?
    +	count: int: how many times does this statement run?
    +	atomic: int: count, but correct in multithreaded tests;
    +		significantly more expensive.
    +    Sets -cover.
    +
    +-coverpkg pkg1,pkg2,pkg3
    +    Apply coverage analysis in each test to the given list of packages.
    +    The default is for each test to analyze only the package being tested.
    +    Packages are specified as import paths.
    +    Sets -cover.
    +
    +-cpu 1,2,4
    +    Specify a list of GOMAXPROCS values for which the tests or
    +    benchmarks should be executed.  The default is the current value
    +    of GOMAXPROCS.
    +
    +-parallel n
    +    Allow parallel execution of test functions that call t.Parallel.
    +    The value of this flag is the maximum number of tests to run
    +    simultaneously; by default, it is set to the value of GOMAXPROCS.
    +    Note that -parallel only applies within a single test binary.
    +    The 'go test' command may run tests for different packages
    +    in parallel as well, according to the setting of the -p flag
    +    (see 'go help build').
    +
    +-run regexp
    +    Run only those tests and examples matching the regular expression.
    +    For tests the regular expression is split into smaller ones by
    +    top-level '/', where each must match the corresponding part of a
    +    test's identifier.
    +
    +-short
    +    Tell long-running tests to shorten their run time.
    +    It is off by default but set during all.bash so that installing
    +    the Go tree can run a sanity check but not spend time running
    +    exhaustive tests.
    +
    +-timeout t
    +    If a test runs longer than t, panic.
    +    The default is 10 minutes (10m).
    +
    +-v
    +    Verbose output: log all tests as they are run. Also print all
    +    text from Log and Logf calls even if the test succeeds.
    +
    +

    +The following flags are also recognized by 'go test' and can be used to +profile the tests during execution: +

    +
    -benchmem
    +    Print memory allocation statistics for benchmarks.
    +
    +-blockprofile block.out
    +    Write a goroutine blocking profile to the specified file
    +    when all tests are complete.
    +    Writes test binary as -c would.
    +
    +-blockprofilerate n
    +    Control the detail provided in goroutine blocking profiles by
    +    calling runtime.SetBlockProfileRate with n.
    +    See 'go doc runtime.SetBlockProfileRate'.
    +    The profiler aims to sample, on average, one blocking event every
    +    n nanoseconds the program spends blocked.  By default,
    +    if -test.blockprofile is set without this flag, all blocking events
    +    are recorded, equivalent to -test.blockprofilerate=1.
    +
    +-coverprofile cover.out
    +    Write a coverage profile to the file after all tests have passed.
    +    Sets -cover.
    +
    +-cpuprofile cpu.out
    +    Write a CPU profile to the specified file before exiting.
    +    Writes test binary as -c would.
    +
    +-memprofile mem.out
    +    Write a memory profile to the file after all tests have passed.
    +    Writes test binary as -c would.
    +
    +-memprofilerate n
    +    Enable more precise (and expensive) memory profiles by setting
    +    runtime.MemProfileRate.  See 'go doc runtime.MemProfileRate'.
    +    To profile all memory allocations, use -test.memprofilerate=1
    +    and pass --alloc_space flag to the pprof tool.
    +
    +-mutexprofile mutex.out
    +    Write a mutex contention profile to the specified file
    +    when all tests are complete.
    +    Writes test binary as -c would.
    +
    +-mutexprofilefraction n
    +    Sample 1 in n stack traces of goroutines holding a
    +    contended mutex.
    +
    +-outputdir directory
    +    Place output files from profiling in the specified directory,
    +    by default the directory in which "go test" is running.
    +
    +-trace trace.out
    +    Write an execution trace to the specified file before exiting.
    +
    +

    +Each of these flags is also recognized with an optional 'test.' prefix, +as in -test.v. When invoking the generated test binary (the result of +'go test -c') directly, however, the prefix is mandatory. +

    +

    +The 'go test' command rewrites or removes recognized flags, +as appropriate, both before and after the optional package list, +before invoking the test binary. +

    +

    +For instance, the command +

    +
    go test -v -myflag testdata -cpuprofile=prof.out -x
    +
    +

    +will compile the test binary and then run it as +

    +
    pkg.test -test.v -myflag testdata -test.cpuprofile=prof.out
    +
    +

    +(The -x flag is removed because it applies only to the go command's +execution, not to the test itself.) +

    +

    +The test flags that generate profiles (other than for coverage) also +leave the test binary in pkg.test for use when analyzing the profiles. +

    +

    +When 'go test' runs a test binary, it does so from within the +corresponding package's source code directory. Depending on the test, +it may be necessary to do the same when invoking a generated test +binary directly. +

    +

    +The command-line package list, if present, must appear before any +flag not known to the go test command. Continuing the example above, +the package list would have to appear before -myflag, but could appear +on either side of -v. +

    +

    +To keep an argument for a test binary from being interpreted as a +known flag or a package name, use -args (see 'go help test') which +passes the remainder of the command line through to the test binary +uninterpreted and unaltered. +

    +

    +For instance, the command +

    +
    go test -v -args -x -v
    +
    +

    +will compile the test binary and then run it as +

    +
    pkg.test -test.v -x -v
    +
    +

    +Similarly, +

    +
    go test -args math
    +
    +

    +will compile the test binary and then run it as +

    +
    pkg.test math
    +
    +

    +In the first example, the -x and the second -v are passed through to the +test binary unchanged and with no effect on the go command itself. +In the second example, the argument math is passed through to the test +binary, instead of being interpreted as the package list. +

    +

    Description of testing functions

    +

    +The 'go test' command expects to find test, benchmark, and example functions +in the "*_test.go" files corresponding to the package under test. +

    +

    +A test function is one named TestXXX (where XXX is any alphanumeric string +not starting with a lower case letter) and should have the signature, +

    +
    func TestXXX(t *testing.T) { ... }
    +
    +

    +A benchmark function is one named BenchmarkXXX and should have the signature, +

    +
    func BenchmarkXXX(b *testing.B) { ... }
    +
    +

    +An example function is similar to a test function but, instead of using +*testing.T to report success or failure, prints output to os.Stdout. +If the last comment in the function starts with "Output:" then the output +is compared exactly against the comment (see examples below). If the last +comment begins with "Unordered output:" then the output is compared to the +comment, however the order of the lines is ignored. An example with no such +comment is compiled but not executed. An example with no text after +"Output:" is compiled, executed, and expected to produce no output. +

    +

    +Godoc displays the body of ExampleXXX to demonstrate the use +of the function, constant, or variable XXX. An example of a method M with +receiver type T or *T is named ExampleT_M. There may be multiple examples +for a given function, constant, or variable, distinguished by a trailing _xxx, +where xxx is a suffix not beginning with an upper case letter. +

    +

    +Here is an example of an example: +

    +
    func ExamplePrintln() {
    +	Println("The output of\nthis example.")
    +	// Output: The output of
    +	// this example.
    +}
    +
    +

    +Here is another example where the ordering of the output is ignored: +

    +
    func ExamplePerm() {
    +	for _, value := range Perm(4) {
    +		fmt.Println(value)
    +	}
    +
    +	// Unordered output: 4
    +	// 2
    +	// 1
    +	// 3
    +	// 0
    +}
    +
    +

    +The entire test file is presented as the example when it contains a single +example function, at least one other function, type, variable, or constant +declaration, and no test or benchmark functions. +

    +

    +See the documentation of the testing package for more information. +

    + + + +
    +
    + + + + + + + + +`)) diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go index 395e1c2f04..54726c2a3c 100644 --- a/vendor/golang.org/x/net/http2/hpack/encode.go +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -39,6 +39,7 @@ func NewEncoder(w io.Writer) *Encoder { tableSizeUpdate: false, w: w, } + e.dynTab.table.init() e.dynTab.setMaxSize(initialHeaderTableSize) return e } @@ -88,24 +89,17 @@ func (e *Encoder) WriteField(f HeaderField) error { // only name matches, i points to that index and nameValueMatch // becomes false. func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { - for idx, hf := range staticTable { - if hf.Name != f.Name { - continue - } - if i == 0 { - i = uint64(idx + 1) - } - if f.Sensitive || hf.Value != f.Value { - continue - } - return uint64(idx + 1), true + i, nameValueMatch = staticTable.search(f) + if nameValueMatch { + return i, true } - j, nameValueMatch := e.dynTab.search(f) + j, nameValueMatch := e.dynTab.table.search(f) if nameValueMatch || (i == 0 && j != 0) { - i = j + uint64(len(staticTable)) + return j + uint64(staticTable.len()), nameValueMatch } - return + + return i, false } // SetMaxDynamicTableSize changes the dynamic header table size to v. diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go index d18be440c5..176644acda 100644 --- a/vendor/golang.org/x/net/http2/hpack/hpack.go +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -102,6 +102,7 @@ func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decod emit: emitFunc, emitEnabled: true, } + d.dynTab.table.init() d.dynTab.allowedMaxSize = maxDynamicTableSize d.dynTab.setMaxSize(maxDynamicTableSize) return d @@ -154,12 +155,9 @@ func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { } type dynamicTable struct { - // ents is the FIFO described at // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 - // The newest (low index) is append at the end, and items are - // evicted from the front. - ents []HeaderField - size uint32 + table headerFieldTable + size uint32 // in bytes maxSize uint32 // current maxSize allowedMaxSize uint32 // maxSize may go up to this, inclusive } @@ -169,77 +167,45 @@ func (dt *dynamicTable) setMaxSize(v uint32) { dt.evict() } -// TODO: change dynamicTable to be a struct with a slice and a size int field, -// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1: -// -// -// Then make add increment the size. maybe the max size should move from Decoder to -// dynamicTable and add should return an ok bool if there was enough space. -// -// Later we'll need a remove operation on dynamicTable. - func (dt *dynamicTable) add(f HeaderField) { - dt.ents = append(dt.ents, f) + dt.table.addEntry(f) dt.size += f.Size() dt.evict() } -// If we're too big, evict old stuff (front of the slice) +// If we're too big, evict old stuff. func (dt *dynamicTable) evict() { - base := dt.ents // keep base pointer of slice - for dt.size > dt.maxSize { - dt.size -= dt.ents[0].Size() - dt.ents = dt.ents[1:] - } - - // Shift slice contents down if we evicted things. - if len(dt.ents) != len(base) { - copy(base, dt.ents) - dt.ents = base[:len(dt.ents)] + var n int + for dt.size > dt.maxSize && n < dt.table.len() { + dt.size -= dt.table.ents[n].Size() + n++ } -} - -// Search searches f in the table. The return value i is 0 if there is -// no name match. If there is name match or name/value match, i is the -// index of that entry (1-based). If both name and value match, -// nameValueMatch becomes true. -func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) { - l := len(dt.ents) - for j := l - 1; j >= 0; j-- { - ent := dt.ents[j] - if ent.Name != f.Name { - continue - } - if i == 0 { - i = uint64(l - j) - } - if f.Sensitive { - continue - } - if ent.Value != f.Value { - continue - } - return uint64(l - j), true - } - return i, false + dt.table.evictOldest(n) } func (d *Decoder) maxTableIndex() int { - return len(d.dynTab.ents) + len(staticTable) + // This should never overflow. RFC 7540 Section 6.5.2 limits the size of + // the dynamic table to 2^32 bytes, where each entry will occupy more than + // one byte. Further, the staticTable has a fixed, small length. + return d.dynTab.table.len() + staticTable.len() } func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { - if i < 1 { + // See Section 2.3.3. + if i == 0 { return } + if i <= uint64(staticTable.len()) { + return staticTable.ents[i-1], true + } if i > uint64(d.maxTableIndex()) { return } - if i <= uint64(len(staticTable)) { - return staticTable[i-1], true - } - dents := d.dynTab.ents - return dents[len(dents)-(int(i)-len(staticTable))], true + // In the dynamic table, newer entries have lower indices. + // However, dt.ents[0] is the oldest entry. Hence, dt.ents is + // the reversed dynamic table. + dt := d.dynTab.table + return dt.ents[dt.len()-(int(i)-staticTable.len())], true } // Decode decodes an entire block. diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go index b9283a0233..a66cfbea69 100644 --- a/vendor/golang.org/x/net/http2/hpack/tables.go +++ b/vendor/golang.org/x/net/http2/hpack/tables.go @@ -4,73 +4,200 @@ package hpack -func pair(name, value string) HeaderField { - return HeaderField{Name: name, Value: value} +import ( + "fmt" +) + +// headerFieldTable implements a list of HeaderFields. +// This is used to implement the static and dynamic tables. +type headerFieldTable struct { + // For static tables, entries are never evicted. + // + // For dynamic tables, entries are evicted from ents[0] and added to the end. + // Each entry has a unique id that starts at one and increments for each + // entry that is added. This unique id is stable across evictions, meaning + // it can be used as a pointer to a specific entry. As in hpack, unique ids + // are 1-based. The unique id for ents[k] is k + evictCount + 1. + // + // Zero is not a valid unique id. + // + // evictCount should not overflow in any remotely practical situation. In + // practice, we will have one dynamic table per HTTP/2 connection. If we + // assume a very powerful server that handles 1M QPS per connection and each + // request adds (then evicts) 100 entries from the table, it would still take + // 2M years for evictCount to overflow. + ents []HeaderField + evictCount uint64 + + // byName maps a HeaderField name to the unique id of the newest entry with + // the same name. See above for a definition of "unique id". + byName map[string]uint64 + + // byNameValue maps a HeaderField name/value pair to the unique id of the newest + // entry with the same name and value. See above for a definition of "unique id". + byNameValue map[pairNameValue]uint64 +} + +type pairNameValue struct { + name, value string +} + +func (t *headerFieldTable) init() { + t.byName = make(map[string]uint64) + t.byNameValue = make(map[pairNameValue]uint64) +} + +// len reports the number of entries in the table. +func (t *headerFieldTable) len() int { + return len(t.ents) +} + +// addEntry adds a new entry. +func (t *headerFieldTable) addEntry(f HeaderField) { + id := uint64(t.len()) + t.evictCount + 1 + t.byName[f.Name] = id + t.byNameValue[pairNameValue{f.Name, f.Value}] = id + t.ents = append(t.ents, f) +} + +// evictOldest evicts the n oldest entries in the table. +func (t *headerFieldTable) evictOldest(n int) { + if n > t.len() { + panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) + } + for k := 0; k < n; k++ { + f := t.ents[k] + id := t.evictCount + uint64(k) + 1 + if t.byName[f.Name] == id { + delete(t.byName, f.Name) + } + if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { + delete(t.byNameValue, p) + } + } + copy(t.ents, t.ents[n:]) + for k := t.len() - n; k < t.len(); k++ { + t.ents[k] = HeaderField{} // so strings can be garbage collected + } + t.ents = t.ents[:t.len()-n] + if t.evictCount+uint64(n) < t.evictCount { + panic("evictCount overflow") + } + t.evictCount += uint64(n) +} + +// search finds f in the table. If there is no match, i is 0. +// If both name and value match, i is the matched index and nameValueMatch +// becomes true. If only name matches, i points to that index and +// nameValueMatch becomes false. +// +// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says +// that index 1 should be the newest entry, but t.ents[0] is the oldest entry, +// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic +// table, the return value i actually refers to the entry t.ents[t.len()-i]. +// +// All tables are assumed to be a dynamic tables except for the global +// staticTable pointer. +// +// See Section 2.3.3. +func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { + if !f.Sensitive { + if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { + return t.idToIndex(id), true + } + } + if id := t.byName[f.Name]; id != 0 { + return t.idToIndex(id), false + } + return 0, false +} + +// idToIndex converts a unique id to an HPACK index. +// See Section 2.3.3. +func (t *headerFieldTable) idToIndex(id uint64) uint64 { + if id <= t.evictCount { + panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) + } + k := id - t.evictCount - 1 // convert id to an index t.ents[k] + if t != staticTable { + return uint64(t.len()) - k // dynamic table + } + return k + 1 } // http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B -var staticTable = [...]HeaderField{ - pair(":authority", ""), // index 1 (1-based) - pair(":method", "GET"), - pair(":method", "POST"), - pair(":path", "/"), - pair(":path", "/index.html"), - pair(":scheme", "http"), - pair(":scheme", "https"), - pair(":status", "200"), - pair(":status", "204"), - pair(":status", "206"), - pair(":status", "304"), - pair(":status", "400"), - pair(":status", "404"), - pair(":status", "500"), - pair("accept-charset", ""), - pair("accept-encoding", "gzip, deflate"), - pair("accept-language", ""), - pair("accept-ranges", ""), - pair("accept", ""), - pair("access-control-allow-origin", ""), - pair("age", ""), - pair("allow", ""), - pair("authorization", ""), - pair("cache-control", ""), - pair("content-disposition", ""), - pair("content-encoding", ""), - pair("content-language", ""), - pair("content-length", ""), - pair("content-location", ""), - pair("content-range", ""), - pair("content-type", ""), - pair("cookie", ""), - pair("date", ""), - pair("etag", ""), - pair("expect", ""), - pair("expires", ""), - pair("from", ""), - pair("host", ""), - pair("if-match", ""), - pair("if-modified-since", ""), - pair("if-none-match", ""), - pair("if-range", ""), - pair("if-unmodified-since", ""), - pair("last-modified", ""), - pair("link", ""), - pair("location", ""), - pair("max-forwards", ""), - pair("proxy-authenticate", ""), - pair("proxy-authorization", ""), - pair("range", ""), - pair("referer", ""), - pair("refresh", ""), - pair("retry-after", ""), - pair("server", ""), - pair("set-cookie", ""), - pair("strict-transport-security", ""), - pair("transfer-encoding", ""), - pair("user-agent", ""), - pair("vary", ""), - pair("via", ""), - pair("www-authenticate", ""), +var staticTable = newStaticTable() +var staticTableEntries = [...]HeaderField{ + {Name: ":authority"}, + {Name: ":method", Value: "GET"}, + {Name: ":method", Value: "POST"}, + {Name: ":path", Value: "/"}, + {Name: ":path", Value: "/index.html"}, + {Name: ":scheme", Value: "http"}, + {Name: ":scheme", Value: "https"}, + {Name: ":status", Value: "200"}, + {Name: ":status", Value: "204"}, + {Name: ":status", Value: "206"}, + {Name: ":status", Value: "304"}, + {Name: ":status", Value: "400"}, + {Name: ":status", Value: "404"}, + {Name: ":status", Value: "500"}, + {Name: "accept-charset"}, + {Name: "accept-encoding", Value: "gzip, deflate"}, + {Name: "accept-language"}, + {Name: "accept-ranges"}, + {Name: "accept"}, + {Name: "access-control-allow-origin"}, + {Name: "age"}, + {Name: "allow"}, + {Name: "authorization"}, + {Name: "cache-control"}, + {Name: "content-disposition"}, + {Name: "content-encoding"}, + {Name: "content-language"}, + {Name: "content-length"}, + {Name: "content-location"}, + {Name: "content-range"}, + {Name: "content-type"}, + {Name: "cookie"}, + {Name: "date"}, + {Name: "etag"}, + {Name: "expect"}, + {Name: "expires"}, + {Name: "from"}, + {Name: "host"}, + {Name: "if-match"}, + {Name: "if-modified-since"}, + {Name: "if-none-match"}, + {Name: "if-range"}, + {Name: "if-unmodified-since"}, + {Name: "last-modified"}, + {Name: "link"}, + {Name: "location"}, + {Name: "max-forwards"}, + {Name: "proxy-authenticate"}, + {Name: "proxy-authorization"}, + {Name: "range"}, + {Name: "referer"}, + {Name: "refresh"}, + {Name: "retry-after"}, + {Name: "server"}, + {Name: "set-cookie"}, + {Name: "strict-transport-security"}, + {Name: "transfer-encoding"}, + {Name: "user-agent"}, + {Name: "vary"}, + {Name: "via"}, + {Name: "www-authenticate"}, +} + +func newStaticTable() *headerFieldTable { + t := &headerFieldTable{} + t.init() + for _, e := range staticTableEntries[:] { + t.addEntry(e) + } + return t } var huffmanCodes = [256]uint32{ diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go index efd2e1282c..508cebcc4d 100644 --- a/vendor/golang.org/x/net/http2/not_go16.go +++ b/vendor/golang.org/x/net/http2/not_go16.go @@ -7,7 +7,6 @@ package http2 import ( - "crypto/tls" "net/http" "time" ) @@ -20,27 +19,3 @@ func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { return 0 } - -// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. -func isBadCipher(cipher uint16) bool { - switch cipher { - case tls.TLS_RSA_WITH_RC4_128_SHA, - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: - // Reject cipher suites from Appendix A. - // "This list includes those cipher suites that do not - // offer an ephemeral key exchange and those that are - // based on the TLS null, stream or block cipher type" - return true - default: - return false - } -} diff --git a/vendor/golang.org/x/net/http2/not_go19.go b/vendor/golang.org/x/net/http2/not_go19.go new file mode 100644 index 0000000000..5ae07726b7 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go19.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package http2 + +import ( + "net/http" +) + +func configureServer19(s *http.Server, conf *Server) error { + // not supported prior to go1.9 + return nil +} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index 914aaf8a7f..a6140099cb 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -15,8 +15,8 @@ import ( // underlying buffer is an interface. (io.Pipe is always unbuffered) type pipe struct { mu sync.Mutex - c sync.Cond // c.L lazily initialized to &p.mu - b pipeBuffer + c sync.Cond // c.L lazily initialized to &p.mu + b pipeBuffer // nil when done reading err error // read error once empty. non-nil means closed. breakErr error // immediate read error (caller doesn't see rest of b) donec chan struct{} // closed on error @@ -32,6 +32,9 @@ type pipeBuffer interface { func (p *pipe) Len() int { p.mu.Lock() defer p.mu.Unlock() + if p.b == nil { + return 0 + } return p.b.Len() } @@ -47,7 +50,7 @@ func (p *pipe) Read(d []byte) (n int, err error) { if p.breakErr != nil { return 0, p.breakErr } - if p.b.Len() > 0 { + if p.b != nil && p.b.Len() > 0 { return p.b.Read(d) } if p.err != nil { @@ -55,6 +58,7 @@ func (p *pipe) Read(d []byte) (n int, err error) { p.readFn() // e.g. copy trailers p.readFn = nil // not sticky like p.err } + p.b = nil return 0, p.err } p.c.Wait() @@ -75,6 +79,9 @@ func (p *pipe) Write(d []byte) (n int, err error) { if p.err != nil { return 0, errClosedPipeWrite } + if p.breakErr != nil { + return len(d), nil // discard when there is no reader + } return p.b.Write(d) } @@ -109,6 +116,9 @@ func (p *pipe) closeWithError(dst *error, err error, fn func()) { return } p.readFn = fn + if dst == &p.breakErr { + p.b = nil + } *dst = err p.closeDoneLocked() } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 3c641a8c24..7367b31c57 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -110,9 +110,41 @@ type Server struct { // activity for the purposes of IdleTimeout. IdleTimeout time.Duration + // MaxUploadBufferPerConnection is the size of the initial flow + // control window for each connections. The HTTP/2 spec does not + // allow this to be smaller than 65535 or larger than 2^32-1. + // If the value is outside this range, a default value will be + // used instead. + MaxUploadBufferPerConnection int32 + + // MaxUploadBufferPerStream is the size of the initial flow control + // window for each stream. The HTTP/2 spec does not allow this to + // be larger than 2^32-1. If the value is zero or larger than the + // maximum, a default value will be used instead. + MaxUploadBufferPerStream int32 + // NewWriteScheduler constructs a write scheduler for a connection. // If nil, a default scheduler is chosen. NewWriteScheduler func() WriteScheduler + + // Internal state. This is a pointer (rather than embedded directly) + // so that we don't embed a Mutex in this struct, which will make the + // struct non-copyable, which might break some callers. + state *serverInternalState +} + +func (s *Server) initialConnRecvWindowSize() int32 { + if s.MaxUploadBufferPerConnection > initialWindowSize { + return s.MaxUploadBufferPerConnection + } + return 1 << 20 +} + +func (s *Server) initialStreamRecvWindowSize() int32 { + if s.MaxUploadBufferPerStream > 0 { + return s.MaxUploadBufferPerStream + } + return 1 << 20 } func (s *Server) maxReadFrameSize() uint32 { @@ -129,6 +161,40 @@ func (s *Server) maxConcurrentStreams() uint32 { return defaultMaxStreams } +type serverInternalState struct { + mu sync.Mutex + activeConns map[*serverConn]struct{} +} + +func (s *serverInternalState) registerConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + s.activeConns[sc] = struct{}{} + s.mu.Unlock() +} + +func (s *serverInternalState) unregisterConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + delete(s.activeConns, sc) + s.mu.Unlock() +} + +func (s *serverInternalState) startGracefulShutdown() { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + for sc := range s.activeConns { + sc.startGracefulShutdown() + } + s.mu.Unlock() +} + // ConfigureServer adds HTTP/2 support to a net/http Server. // // The configuration conf may be nil. @@ -141,9 +207,13 @@ func ConfigureServer(s *http.Server, conf *Server) error { if conf == nil { conf = new(Server) } + conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} if err := configureServer18(s, conf); err != nil { return err } + if err := configureServer19(s, conf); err != nil { + return err + } if s.TLSConfig == nil { s.TLSConfig = new(tls.Config) @@ -255,35 +325,37 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { defer cancel() sc := &serverConn{ - srv: s, - hs: opts.baseConfig(), - conn: c, - baseCtx: baseCtx, - remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), - handler: opts.handler(), - streams: make(map[uint32]*stream), - readFrameCh: make(chan readFrameResult), - wantWriteFrameCh: make(chan FrameWriteRequest, 8), - wantStartPushCh: make(chan startPushRequest, 8), - wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync - bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way - doneServing: make(chan struct{}), - clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), - initialWindowSize: initialWindowSize, - maxFrameSize: initialMaxFrameSize, - headerTableSize: initialHeaderTableSize, - serveG: newGoroutineLock(), - pushEnabled: true, - } + srv: s, + hs: opts.baseConfig(), + conn: c, + baseCtx: baseCtx, + remoteAddrStr: c.RemoteAddr().String(), + bw: newBufferedWriter(c), + handler: opts.handler(), + streams: make(map[uint32]*stream), + readFrameCh: make(chan readFrameResult), + wantWriteFrameCh: make(chan FrameWriteRequest, 8), + serveMsgCh: make(chan interface{}, 8), + wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync + bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way + doneServing: make(chan struct{}), + clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" + advMaxStreams: s.maxConcurrentStreams(), + initialStreamSendWindowSize: initialWindowSize, + maxFrameSize: initialMaxFrameSize, + headerTableSize: initialHeaderTableSize, + serveG: newGoroutineLock(), + pushEnabled: true, + } + + s.state.registerConn(sc) + defer s.state.unregisterConn(sc) // The net/http package sets the write deadline from the // http.Server.WriteTimeout during the TLS handshake, but then - // passes the connection off to us with the deadline already - // set. Disarm it here so that it is not applied to additional - // streams opened on this connection. - // TODO: implement WriteTimeout fully. See Issue 18437. + // passes the connection off to us with the deadline already set. + // Write deadlines are set per stream in serverConn.newStream. + // Disarm the net.Conn write deadline here. if sc.hs.WriteTimeout != 0 { sc.conn.SetWriteDeadline(time.Time{}) } @@ -294,6 +366,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { sc.writeSched = NewRandomWriteScheduler() } + // These start at the RFC-specified defaults. If there is a higher + // configured value for inflow, that will be updated when we send a + // WINDOW_UPDATE shortly after sending SETTINGS. sc.flow.add(initialWindowSize) sc.inflow.add(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) @@ -376,10 +451,9 @@ type serverConn struct { doneServing chan struct{} // closed when serverConn.serve ends readFrameCh chan readFrameResult // written by serverConn.readFrames wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve - wantStartPushCh chan startPushRequest // from handlers -> serve wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes bodyReadCh chan bodyReadMsg // from handlers -> serve - testHookCh chan func(int) // code to run on the serve loop + serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop flow flow // conn-wide (not stream-specific) outbound flow control inflow flow // conn-wide inbound flow control tlsState *tls.ConnectionState // shared by all handlers, like net/http @@ -387,38 +461,39 @@ type serverConn struct { writeSched WriteScheduler // Everything following is owned by the serve loop; use serveG.check(): - serveG goroutineLock // used to verify funcs are on serve() - pushEnabled bool - sawFirstSettings bool // got the initial SETTINGS frame after the preface - needToSendSettingsAck bool - unackedSettings int // how many SETTINGS have we sent without ACKs? - clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) - advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client - curClientStreams uint32 // number of open streams initiated by the client - curPushedStreams uint32 // number of open streams initiated by server push - maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests - maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes - streams map[uint32]*stream - initialWindowSize int32 - maxFrameSize int32 - headerTableSize uint32 - peerMaxHeaderListSize uint32 // zero means unknown (default) - canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case - writingFrame bool // started writing a frame (on serve goroutine or separate) - writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh - needsFrameFlush bool // last frame write wasn't a flush - inGoAway bool // we've started to or sent GOAWAY - inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop - needToSendGoAway bool // we need to schedule a GOAWAY frame write - goAwayCode ErrCode - shutdownTimerCh <-chan time.Time // nil until used - shutdownTimer *time.Timer // nil until used - idleTimer *time.Timer // nil if unused - idleTimerCh <-chan time.Time // nil if unused + serveG goroutineLock // used to verify funcs are on serve() + pushEnabled bool + sawFirstSettings bool // got the initial SETTINGS frame after the preface + needToSendSettingsAck bool + unackedSettings int // how many SETTINGS have we sent without ACKs? + clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) + advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client + curClientStreams uint32 // number of open streams initiated by the client + curPushedStreams uint32 // number of open streams initiated by server push + maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests + maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes + streams map[uint32]*stream + initialStreamSendWindowSize int32 + maxFrameSize int32 + headerTableSize uint32 + peerMaxHeaderListSize uint32 // zero means unknown (default) + canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + writingFrame bool // started writing a frame (on serve goroutine or separate) + writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh + needsFrameFlush bool // last frame write wasn't a flush + inGoAway bool // we've started to or sent GOAWAY + inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop + needToSendGoAway bool // we need to schedule a GOAWAY frame write + goAwayCode ErrCode + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer hpackEncoder *hpack.Encoder + + // Used by startGracefulShutdown. + shutdownOnce sync.Once } func (sc *serverConn) maxHeaderListSize() uint32 { @@ -463,10 +538,10 @@ type stream struct { numTrailerValues int64 weight uint8 state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - reqBuf []byte // if non-nil, body pipe buffer to return later at EOF + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + writeDeadline *time.Timer // nil if unused trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -696,15 +771,17 @@ func (sc *serverConn) serve() { {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - - // TODO: more actual settings, notably - // SettingInitialWindowSize, but then we also - // want to bump up the conn window size the - // same amount here right after the settings + {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, }, }) sc.unackedSettings++ + // Each connection starts with intialWindowSize inflow tokens. + // If a higher value is configured, we add more tokens. + if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + sc.sendWindowUpdate(nil, int(diff)) + } + if err := sc.readPreface(); err != nil { sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) return @@ -717,27 +794,25 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateIdle) if sc.srv.IdleTimeout != 0 { - sc.idleTimer = time.NewTimer(sc.srv.IdleTimeout) + sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() - sc.idleTimerCh = sc.idleTimer.C - } - - var gracefulShutdownCh <-chan struct{} - if sc.hs != nil { - gracefulShutdownCh = h1ServerShutdownChan(sc.hs) } go sc.readFrames() // closed by defer sc.conn.Close above - settingsTimer := time.NewTimer(firstSettingsTimeout) + settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + defer settingsTimer.Stop() + loopNum := 0 for { loopNum++ select { case wr := <-sc.wantWriteFrameCh: + if se, ok := wr.write.(StreamError); ok { + sc.resetStream(se) + break + } sc.writeFrame(wr) - case spr := <-sc.wantStartPushCh: - sc.startPush(spr) case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: @@ -745,26 +820,37 @@ func (sc *serverConn) serve() { return } res.readMore() - if settingsTimer.C != nil { + if settingsTimer != nil { settingsTimer.Stop() - settingsTimer.C = nil + settingsTimer = nil } case m := <-sc.bodyReadCh: sc.noteBodyRead(m.st, m.n) - case <-settingsTimer.C: - sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) - return - case <-gracefulShutdownCh: - gracefulShutdownCh = nil - sc.startGracefulShutdown() - case <-sc.shutdownTimerCh: - sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) - return - case <-sc.idleTimerCh: - sc.vlogf("connection is idle") - sc.goAway(ErrCodeNo) - case fn := <-sc.testHookCh: - fn(loopNum) + case msg := <-sc.serveMsgCh: + switch v := msg.(type) { + case func(int): + v(loopNum) // for testing + case *serverMessage: + switch v { + case settingsTimerMsg: + sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) + return + case idleTimerMsg: + sc.vlogf("connection is idle") + sc.goAway(ErrCodeNo) + case shutdownTimerMsg: + sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) + return + case gracefulShutdownMsg: + sc.startGracefulShutdownInternal() + default: + panic("unknown timer") + } + case *startPushRequest: + sc.startPush(v) + default: + panic(fmt.Sprintf("unexpected type %T", v)) + } } if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame { @@ -773,6 +859,36 @@ func (sc *serverConn) serve() { } } +func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { + select { + case <-sc.doneServing: + case <-sharedCh: + close(privateCh) + } +} + +type serverMessage int + +// Message values sent to serveMsgCh. +var ( + settingsTimerMsg = new(serverMessage) + idleTimerMsg = new(serverMessage) + shutdownTimerMsg = new(serverMessage) + gracefulShutdownMsg = new(serverMessage) +) + +func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } +func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } + +func (sc *serverConn) sendServeMsg(msg interface{}) { + sc.serveG.checkNotOn() // NOT + select { + case sc.serveMsgCh <- msg: + case <-sc.doneServing: + } +} + // readPreface reads the ClientPreface greeting from the peer // or returns an error on timeout or an invalid greeting. func (sc *serverConn) readPreface() error { @@ -1014,7 +1130,11 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { // stateClosed after the RST_STREAM frame is // written. st.state = stateHalfClosedLocal - sc.resetStream(streamError(st.id, ErrCodeCancel)) + // Section 8.1: a server MAY request that the client abort + // transmission of a request without error by sending a + // RST_STREAM with an error code of NO_ERROR after sending + // a complete response. + sc.resetStream(streamError(st.id, ErrCodeNo)) case stateHalfClosedRemote: sc.closeStream(st, errHandlerComplete) } @@ -1086,10 +1206,19 @@ func (sc *serverConn) scheduleFrameWrite() { sc.inFrameScheduleLoop = false } -// startGracefulShutdown sends a GOAWAY with ErrCodeNo to tell the -// client we're gracefully shutting down. The connection isn't closed -// until all current streams are done. +// startGracefulShutdown gracefully shuts down a connection. This +// sends GOAWAY with ErrCodeNo to tell the client we're gracefully +// shutting down. The connection isn't closed until all current +// streams are done. +// +// startGracefulShutdown returns immediately; it does not wait until +// the connection has shut down. func (sc *serverConn) startGracefulShutdown() { + sc.serveG.checkNotOn() // NOT + sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) +} + +func (sc *serverConn) startGracefulShutdownInternal() { sc.goAwayIn(ErrCodeNo, 0) } @@ -1121,8 +1250,7 @@ func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) { func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() - sc.shutdownTimer = time.NewTimer(d) - sc.shutdownTimerCh = sc.shutdownTimer.C + sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) } func (sc *serverConn) resetStream(se StreamError) { @@ -1305,6 +1433,9 @@ func (sc *serverConn) closeStream(st *stream, err error) { panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) } st.state = stateClosed + if st.writeDeadline != nil { + st.writeDeadline.Stop() + } if st.isPushed() { sc.curPushedStreams-- } else { @@ -1317,7 +1448,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { - sc.startGracefulShutdown() + sc.startGracefulShutdownInternal() } } if p := st.body; p != nil { @@ -1395,9 +1526,9 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { // adjust the size of all stream flow control windows that it // maintains by the difference between the new value and the // old value." - old := sc.initialWindowSize - sc.initialWindowSize = int32(val) - growth := sc.initialWindowSize - old // may be negative + old := sc.initialStreamSendWindowSize + sc.initialStreamSendWindowSize = int32(val) + growth := int32(val) - old // may be negative for _, st := range sc.streams { if !st.flow.add(growth) { // 6.9.2 Initial Flow Control Window Size @@ -1504,7 +1635,7 @@ func (sc *serverConn) processGoAway(f *GoAwayFrame) error { } else { sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) } - sc.startGracefulShutdown() + sc.startGracefulShutdownInternal() // http://tools.ietf.org/html/rfc7540#section-6.8 // We should not create any new streams, which means we should disable push. sc.pushEnabled = false @@ -1543,6 +1674,12 @@ func (st *stream) copyTrailersToHandlerRequest() { } } +// onWriteTimeout is run on its own goroutine (from time.AfterFunc) +// when the stream's WriteTimeout has fired. +func (st *stream) onWriteTimeout() { + st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) +} + func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { sc.serveG.check() id := f.StreamID @@ -1719,9 +1856,12 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream } st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter - st.flow.add(sc.initialWindowSize) - st.inflow.conn = &sc.inflow // link to conn-level counter - st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings + st.flow.add(sc.initialStreamSendWindowSize) + st.inflow.conn = &sc.inflow // link to conn-level counter + st.inflow.add(sc.srv.initialStreamRecvWindowSize()) + if sc.hs.WriteTimeout != 0 { + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + } sc.streams[id] = st sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) @@ -1785,16 +1925,14 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return nil, nil, err } if bodyOpen { - st.reqBuf = getRequestBodyBuf() - req.Body.(*requestBody).pipe = &pipe{ - b: &fixedBuffer{buf: st.reqBuf}, - } - if vv, ok := rp.header["Content-Length"]; ok { req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) } else { req.ContentLength = -1 } + req.Body.(*requestBody).pipe = &pipe{ + b: &dataBuffer{expected: req.ContentLength}, + } } return rw, req, nil } @@ -1890,24 +2028,6 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r return rw, req, nil } -var reqBodyCache = make(chan []byte, 8) - -func getRequestBodyBuf() []byte { - select { - case b := <-reqBodyCache: - return b - default: - return make([]byte, initialWindowSize) - } -} - -func putRequestBodyBuf(b []byte) { - select { - case reqBodyCache <- b: - default: - } -} - // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { didPanic := true @@ -2003,12 +2123,6 @@ func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { case <-sc.doneServing: } } - if err == io.EOF { - if buf := st.reqBuf; buf != nil { - st.reqBuf = nil // shouldn't matter; field unused by other - putRequestBodyBuf(buf) - } - } } func (sc *serverConn) noteBodyRead(st *stream, n int) { @@ -2514,7 +2628,7 @@ func (w *responseWriter) push(target string, opts pushOptions) error { return fmt.Errorf("method %q must be GET or HEAD", opts.Method) } - msg := startPushRequest{ + msg := &startPushRequest{ parent: st, method: opts.Method, url: u, @@ -2527,7 +2641,7 @@ func (w *responseWriter) push(target string, opts pushOptions) error { return errClientDisconnected case <-st.cw: return errStreamClosed - case sc.wantStartPushCh <- msg: + case sc.serveMsgCh <- msg: } select { @@ -2549,7 +2663,7 @@ type startPushRequest struct { done chan error } -func (sc *serverConn) startPush(msg startPushRequest) { +func (sc *serverConn) startPush(msg *startPushRequest) { sc.serveG.check() // http://tools.ietf.org/html/rfc7540#section-6.6. @@ -2588,7 +2702,7 @@ func (sc *serverConn) startPush(msg startPushRequest) { // A server that is unable to establish a new stream identifier can send a GOAWAY // frame so that the client is forced to open a new connection for new streams. if sc.maxPushPromiseID+2 >= 1<<31 { - sc.startGracefulShutdown() + sc.startGracefulShutdownInternal() return 0, ErrPushLimitReached } sc.maxPushPromiseID += 2 @@ -2713,31 +2827,6 @@ var badTrailer = map[string]bool{ "Www-Authenticate": true, } -// h1ServerShutdownChan returns a channel that will be closed when the -// provided *http.Server wants to shut down. -// -// This is a somewhat hacky way to get at http1 innards. It works -// when the http2 code is bundled into the net/http package in the -// standard library. The alternatives ended up making the cmd/go tool -// depend on http Servers. This is the lightest option for now. -// This is tested via the TestServeShutdown* tests in net/http. -func h1ServerShutdownChan(hs *http.Server) <-chan struct{} { - if fn := testh1ServerShutdownChan; fn != nil { - return fn(hs) - } - var x interface{} = hs - type I interface { - getDoneChan() <-chan struct{} - } - if hs, ok := x.(I); ok { - return hs.getDoneChan() - } - return nil -} - -// optional test hook for h1ServerShutdownChan. -var testh1ServerShutdownChan func(hs *http.Server) <-chan struct{} - // h1ServerKeepAlivesDisabled reports whether hs has its keep-alives // disabled. See comments on h1ServerShutdownChan above for why // the code is written this way. diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index fef8396867..3a85f25a20 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -1528,8 +1528,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra return res, nil } - buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage - cs.bufPipe = pipe{b: buf} + cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} cs.bytesRemain = res.ContentLength res.Body = transportResponseBody{cs} go cs.awaitRequestCancel(cs.req) @@ -1656,6 +1655,7 @@ func (b transportResponseBody) Close() error { cc.wmu.Lock() if !serverSentStreamEnd { cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) + cs.didReset = true } // Return connection-level flow control. if unread > 0 { @@ -1703,12 +1703,6 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { return nil } if f.Length > 0 { - if len(data) > 0 && cs.bufPipe.b == nil { - // Data frame after it's already closed? - cc.logf("http2: Transport received DATA frame for closed stream; closing connection") - return ConnectionError(ErrCodeProtocol) - } - // Check connection-level flow control. cc.mu.Lock() if cs.inflow.available() >= int32(f.Length) { diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go index 01132721bd..848fed6ec7 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -53,7 +53,7 @@ type PriorityWriteSchedulerConfig struct { } // NewPriorityWriteScheduler constructs a WriteScheduler that schedules -// frames by following HTTP/2 priorities as described in RFC 7340 Section 5.3. +// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. // If cfg is nil, default options are used. func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { if cfg == nil { diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/LICENSE b/vendor/google.golang.org/genproto/googleapis/rpc/status/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go new file mode 100644 index 0000000000..40e79375bf --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -0,0 +1,143 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/rpc/status.proto + +/* +Package status is a generated protocol buffer package. + +It is generated from these files: + google/rpc/status.proto + +It has these top-level messages: + Status +*/ +package status + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The `Status` type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. It is used by +// [gRPC](https://github.com/grpc). The error model is designed to be: +// +// - Simple to use and understand for most users +// - Flexible enough to meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error message, +// and error details. The error code should be an enum value of +// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The +// error message should be a developer-facing English message that helps +// developers *understand* and *resolve* the error. If a localized user-facing +// error message is needed, put the localized message in the error details or +// localize it in the client. The optional error details may contain arbitrary +// information about the error. There is a predefined set of error detail types +// in the package `google.rpc` which can be used for common error conditions. +// +// # Language mapping +// +// The `Status` message is the logical representation of the error model, but it +// is not necessarily the actual wire format. When the `Status` message is +// exposed in different client libraries and different wire protocols, it can be +// mapped differently. For example, it will likely be mapped to some exceptions +// in Java, but more likely mapped to some error codes in C. +// +// # Other uses +// +// The error model and the `Status` message can be used in a variety of +// environments, either with or without APIs, to provide a +// consistent developer experience across different environments. +// +// Example uses of this error model include: +// +// - Partial errors. If a service needs to return partial errors to the client, +// it may embed the `Status` in the normal response to indicate the partial +// errors. +// +// - Workflow errors. A typical workflow has multiple steps. Each step may +// have a `Status` message for error reporting purpose. +// +// - Batch operations. If a client uses batch request and batch response, the +// `Status` message should be used directly inside batch response, one for +// each error sub-response. +// +// - Asynchronous operations. If an API call embeds asynchronous operation +// results in its response, the status of those operations should be +// represented directly using the `Status` message. +// +// - Logging. If some API errors are stored in logs, the message `Status` could +// be used directly after any stripping needed for security/privacy reasons. +type Status struct { + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` + // A list of messages that carry the error details. There will be a + // common set of message types for APIs to use. + Details []*google_protobuf.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Status) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *Status) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Status) GetDetails() []*google_protobuf.Any { + if m != nil { + return m.Details + } + return nil +} + +func init() { + proto.RegisterType((*Status)(nil), "google.rpc.Status") +} + +func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81, + 0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1, + 0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, + 0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05, + 0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7, + 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7, + 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c, + 0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12, + 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12, + 0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1, + 0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00, + 0x00, +} diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go index 9d943fbada..9af4ee70ac 100644 --- a/vendor/google.golang.org/grpc/balancer.go +++ b/vendor/google.golang.org/grpc/balancer.go @@ -35,6 +35,7 @@ package grpc import ( "fmt" + "net" "sync" "golang.org/x/net/context" @@ -60,6 +61,10 @@ type BalancerConfig struct { // use to dial to a remote load balancer server. The Balancer implementations // can ignore this if it does not need to talk to another party securely. DialCreds credentials.TransportCredentials + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) } // BalancerGetOptions configures a Get call. @@ -385,6 +390,9 @@ func (rr *roundRobin) Notify() <-chan []Address { func (rr *roundRobin) Close() error { rr.mu.Lock() defer rr.mu.Unlock() + if rr.done { + return errBalancerClosed + } rr.done = true if rr.w != nil { rr.w.Close() diff --git a/vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go b/vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go index 199bbe1fa3..dfe8c8f0fe 100644 --- a/vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go +++ b/vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go @@ -37,6 +37,7 @@ import ( "math" "runtime" "sync" + "syscall" "time" "golang.org/x/net/context" @@ -85,6 +86,7 @@ type benchmarkClient struct { lastResetTime time.Time histogramOptions stats.HistogramOptions lockingHistograms []lockingHistogram + rusageLastReset *syscall.Rusage } func printClientConfig(config *testpb.ClientConfig) { @@ -226,6 +228,9 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) return nil, err } + rusage := new(syscall.Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, rusage) + rpcCountPerConn := int(config.OutstandingRpcsPerChannel) bc := &benchmarkClient{ histogramOptions: stats.HistogramOptions{ @@ -236,9 +241,10 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) }, lockingHistograms: make([]lockingHistogram, rpcCountPerConn*len(conns), rpcCountPerConn*len(conns)), - stop: make(chan bool), - lastResetTime: time.Now(), - closeConns: closeConns, + stop: make(chan bool), + lastResetTime: time.Now(), + closeConns: closeConns, + rusageLastReset: rusage, } if err = performRPCs(config, conns, bc); err != nil { @@ -338,8 +344,9 @@ func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCou // getStats returns the stats for benchmark client. // It resets lastResetTime and all histograms if argument reset is true. func (bc *benchmarkClient) getStats(reset bool) *testpb.ClientStats { - var timeElapsed float64 + var wallTimeElapsed, uTimeElapsed, sTimeElapsed float64 mergedHistogram := stats.NewHistogram(bc.histogramOptions) + latestRusage := new(syscall.Rusage) if reset { // Merging histogram may take some time. @@ -353,14 +360,21 @@ func (bc *benchmarkClient) getStats(reset bool) *testpb.ClientStats { mergedHistogram.Merge(toMerge[i]) } - timeElapsed = time.Since(bc.lastResetTime).Seconds() + wallTimeElapsed = time.Since(bc.lastResetTime).Seconds() + syscall.Getrusage(syscall.RUSAGE_SELF, latestRusage) + uTimeElapsed, sTimeElapsed = cpuTimeDiff(bc.rusageLastReset, latestRusage) + + bc.rusageLastReset = latestRusage bc.lastResetTime = time.Now() } else { // Merge only, not reset. for i := range bc.lockingHistograms { bc.lockingHistograms[i].mergeInto(mergedHistogram) } - timeElapsed = time.Since(bc.lastResetTime).Seconds() + + wallTimeElapsed = time.Since(bc.lastResetTime).Seconds() + syscall.Getrusage(syscall.RUSAGE_SELF, latestRusage) + uTimeElapsed, sTimeElapsed = cpuTimeDiff(bc.rusageLastReset, latestRusage) } b := make([]uint32, len(mergedHistogram.Buckets), len(mergedHistogram.Buckets)) @@ -376,9 +390,9 @@ func (bc *benchmarkClient) getStats(reset bool) *testpb.ClientStats { SumOfSquares: float64(mergedHistogram.SumOfSquares), Count: float64(mergedHistogram.Count), }, - TimeElapsed: timeElapsed, - TimeUser: 0, - TimeSystem: 0, + TimeElapsed: wallTimeElapsed, + TimeUser: uTimeElapsed, + TimeSystem: sTimeElapsed, } } diff --git a/vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go b/vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go index 667ef2c17d..0d20581df4 100644 --- a/vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go +++ b/vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go @@ -38,6 +38,7 @@ import ( "strconv" "strings" "sync" + "syscall" "time" "google.golang.org/grpc" @@ -55,11 +56,12 @@ var ( ) type benchmarkServer struct { - port int - cores int - closeFunc func() - mu sync.RWMutex - lastResetTime time.Time + port int + cores int + closeFunc func() + mu sync.RWMutex + lastResetTime time.Time + rusageLastReset *syscall.Rusage } func printServerConfig(config *testpb.ServerConfig) { @@ -156,18 +158,35 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma grpclog.Fatalf("failed to get port number from server address: %v", err) } - return &benchmarkServer{port: p, cores: numOfCores, closeFunc: closeFunc, lastResetTime: time.Now()}, nil + rusage := new(syscall.Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, rusage) + + return &benchmarkServer{ + port: p, + cores: numOfCores, + closeFunc: closeFunc, + lastResetTime: time.Now(), + rusageLastReset: rusage, + }, nil } // getStats returns the stats for benchmark server. // It resets lastResetTime if argument reset is true. func (bs *benchmarkServer) getStats(reset bool) *testpb.ServerStats { - // TODO wall time, sys time, user time. bs.mu.RLock() defer bs.mu.RUnlock() - timeElapsed := time.Since(bs.lastResetTime).Seconds() + wallTimeElapsed := time.Since(bs.lastResetTime).Seconds() + rusageLatest := new(syscall.Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, rusageLatest) + uTimeElapsed, sTimeElapsed := cpuTimeDiff(bs.rusageLastReset, rusageLatest) + if reset { bs.lastResetTime = time.Now() + bs.rusageLastReset = rusageLatest + } + return &testpb.ServerStats{ + TimeElapsed: wallTimeElapsed, + TimeUser: uTimeElapsed, + TimeSystem: sTimeElapsed, } - return &testpb.ServerStats{TimeElapsed: timeElapsed, TimeUser: 0, TimeSystem: 0} } diff --git a/vendor/google.golang.org/grpc/benchmark/worker/main.go b/vendor/google.golang.org/grpc/benchmark/worker/main.go index 17c52519bb..8a80406202 100644 --- a/vendor/google.golang.org/grpc/benchmark/worker/main.go +++ b/vendor/google.golang.org/grpc/benchmark/worker/main.go @@ -38,6 +38,8 @@ import ( "fmt" "io" "net" + "net/http" + _ "net/http/pprof" "runtime" "strconv" "time" @@ -50,8 +52,10 @@ import ( ) var ( - driverPort = flag.Int("driver_port", 10000, "port for communication with driver") - serverPort = flag.Int("server_port", 0, "port for benchmark server if not specified by server config message") + driverPort = flag.Int("driver_port", 10000, "port for communication with driver") + serverPort = flag.Int("server_port", 0, "port for benchmark server if not specified by server config message") + pprofPort = flag.Int("pprof_port", -1, "Port for pprof debug server to listen on. Pprof server doesn't start if unset") + blockProfRate = flag.Int("block_prof_rate", 0, "fraction of goroutine blocking events to report in blocking profile") ) type byteBufCodec struct { @@ -227,5 +231,14 @@ func main() { s.Stop() }() + runtime.SetBlockProfileRate(*blockProfRate) + + if *pprofPort >= 0 { + go func() { + grpclog.Println("Starting pprof server on port " + strconv.Itoa(*pprofPort)) + grpclog.Println(http.ListenAndServe("localhost:"+strconv.Itoa(*pprofPort), nil)) + }() + } + s.Serve(lis) } diff --git a/vendor/google.golang.org/grpc/benchmark/worker/util.go b/vendor/google.golang.org/grpc/benchmark/worker/util.go index f0016ce49f..6f9b2b03fd 100644 --- a/vendor/google.golang.org/grpc/benchmark/worker/util.go +++ b/vendor/google.golang.org/grpc/benchmark/worker/util.go @@ -36,6 +36,7 @@ import ( "log" "os" "path/filepath" + "syscall" ) // abs returns the absolute path the given relative file or directory path, @@ -52,6 +53,20 @@ func abs(rel string) string { return filepath.Join(v, rel) } +func cpuTimeDiff(first *syscall.Rusage, latest *syscall.Rusage) (float64, float64) { + var ( + utimeDiffs = latest.Utime.Sec - first.Utime.Sec + utimeDiffus = latest.Utime.Usec - first.Utime.Usec + stimeDiffs = latest.Stime.Sec - first.Stime.Sec + stimeDiffus = latest.Stime.Usec - first.Stime.Usec + ) + + uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 + sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 + + return uTimeElapsed, sTimeElapsed +} + func goPackagePath(pkg string) (path string, err error) { gp := os.Getenv("GOPATH") if gp == "" { diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index 81b52be294..e73812fd18 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -36,7 +36,6 @@ package grpc import ( "bytes" "io" - "math" "time" "golang.org/x/net/context" @@ -44,6 +43,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" "google.golang.org/grpc/transport" ) @@ -73,14 +73,17 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran } } for { - if err = recv(p, dopts.codec, stream, dopts.dc, reply, math.MaxInt32, inPayload); err != nil { + if c.maxReceiveMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") + } + if err = recv(p, dopts.codec, stream, dopts.dc, reply, *c.maxReceiveMessageSize, inPayload); err != nil { if err == io.EOF { break } return } } - if inPayload != nil && err == io.EOF && stream.StatusCode() == codes.OK { + if inPayload != nil && err == io.EOF && stream.Status().Code() == codes.OK { // TODO in the current implementation, inTrailer may be handled before inPayload in some cases. // Fix the order if necessary. dopts.copts.StatsHandler.HandleRPC(ctx, inPayload) @@ -93,11 +96,7 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran } // sendRequest writes out various information of an RPC such as Context and Message. -func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { - stream, err := t.NewStream(ctx, callHdr) - if err != nil { - return nil, err - } +func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, c *callInfo, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) { defer func() { if err != nil { // If err is connection error, t will be closed, no need to close stream here. @@ -120,7 +119,13 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, } outBuf, err := encode(dopts.codec, args, compressor, cbuf, outPayload) if err != nil { - return nil, Errorf(codes.Internal, "grpc: %v", err) + return err + } + if c.maxSendMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)") + } + if len(outBuf) > *c.maxSendMessageSize { + return Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(outBuf), *c.maxSendMessageSize) } err = t.Write(stream, outBuf, opts) if err == nil && outPayload != nil { @@ -131,10 +136,10 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, // does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following // recvResponse to get the final status. if err != nil && err != io.EOF { - return nil, err + return err } // Sent successfully. - return stream, nil + return nil } // Invoke sends the RPC request on the wire and returns after response is received. @@ -149,14 +154,18 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) { c := defaultCallInfo - if mc, ok := cc.getMethodConfig(method); ok { - c.failFast = !mc.WaitForReady - if mc.Timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, mc.Timeout) - defer cancel() - } + mc := cc.GetMethodConfig(method) + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + if mc.Timeout != nil && *mc.Timeout >= 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + defer cancel() } + + opts = append(cc.dopts.callOptions, opts...) for _, o := range opts { if err := o.before(&c); err != nil { return toRPCErr(err) @@ -167,6 +176,10 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli o.after(&c) } }() + + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if EnableTracing { c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) defer c.traceInfo.tr.Finish() @@ -183,26 +196,25 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } }() } + ctx = newContextWithRPCInfo(ctx) sh := cc.dopts.copts.StatsHandler if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method}) + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) begin := &stats.Begin{ Client: true, BeginTime: time.Now(), FailFast: c.failFast, } sh.HandleRPC(ctx, begin) - } - defer func() { - if sh != nil { + defer func() { end := &stats.End{ Client: true, EndTime: time.Now(), Error: e, } sh.HandleRPC(ctx, end) - } - }() + }() + } topts := &transport.Options{ Last: true, Delay: false, @@ -224,6 +236,9 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } + if c.creds != nil { + callHdr.Creds = c.creds + } gopts := BalancerGetOptions{ BlockingWait: !c.failFast, @@ -231,7 +246,7 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli t, put, err = cc.getTransport(ctx, gopts) if err != nil { // TODO(zhaoq): Probably revisit the error handling. - if _, ok := err.(*rpcError); ok { + if _, ok := status.FromError(err); ok { return err } if err == errConnClosing || err == errConnUnavailable { @@ -246,19 +261,35 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) } - stream, err = sendRequest(ctx, cc.dopts, cc.dopts.cp, callHdr, t, args, topts) + stream, err = t.NewStream(ctx, callHdr) + if err != nil { + if put != nil { + if _, ok := err.(transport.ConnectionError); ok { + // If error is connection error, transport was sending data on wire, + // and we are not sure if anything has been sent on wire. + // If error is not connection error, we are sure nothing has been sent. + updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false}) + } + put() + } + if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { + continue + } + return toRPCErr(err) + } + err = sendRequest(ctx, cc.dopts, cc.dopts.cp, &c, callHdr, stream, t, args, topts) if err != nil { if put != nil { + updateRPCInfoInContext(ctx, rpcInfo{ + bytesSent: stream.BytesSent(), + bytesReceived: stream.BytesReceived(), + }) put() - put = nil } // Retry a non-failfast RPC when // i) there is a connection error; or // ii) the server started to drain before this RPC was initiated. - if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { - if c.failFast { - return toRPCErr(err) - } + if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { continue } return toRPCErr(err) @@ -266,13 +297,13 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli err = recvResponse(ctx, cc.dopts, t, &c, stream, reply) if err != nil { if put != nil { + updateRPCInfoInContext(ctx, rpcInfo{ + bytesSent: stream.BytesSent(), + bytesReceived: stream.BytesReceived(), + }) put() - put = nil } - if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { - if c.failFast { - return toRPCErr(err) - } + if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { continue } return toRPCErr(err) @@ -282,9 +313,12 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } t.CloseStream(stream, nil) if put != nil { + updateRPCInfoInContext(ctx, rpcInfo{ + bytesSent: stream.BytesSent(), + bytesReceived: stream.BytesReceived(), + }) put() - put = nil } - return Errorf(stream.StatusCode(), "%s", stream.StatusDesc()) + return stream.Status().Err() } } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 459ce0b641..04c16526ee 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -45,6 +45,7 @@ import ( "golang.org/x/net/trace" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/keepalive" "google.golang.org/grpc/stats" "google.golang.org/grpc/transport" ) @@ -55,8 +56,7 @@ var ( ErrClientConnClosing = errors.New("grpc: the client connection is closing") // ErrClientConnTimeout indicates that the ClientConn cannot establish the // underlying connections within the specified timeout. - // DEPRECATED: Please use context.DeadlineExceeded instead. This error will be - // removed in Q1 2017. + // DEPRECATED: Please use context.DeadlineExceeded instead. ErrClientConnTimeout = errors.New("grpc: timed out when dialing") // errNoTransportSecurity indicates that there is no transport security @@ -78,7 +78,8 @@ var ( errConnClosing = errors.New("grpc: the connection is closing") // errConnUnavailable indicates that the connection is unavailable. errConnUnavailable = errors.New("grpc: the connection is unavailable") - errNoAddr = errors.New("grpc: there is no address available to dial") + // errBalancerClosed indicates that the balancer is closed. + errBalancerClosed = errors.New("grpc: balancer is closed") // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second ) @@ -86,23 +87,57 @@ var ( // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { - unaryInt UnaryClientInterceptor - streamInt StreamClientInterceptor - codec Codec - cp Compressor - dc Decompressor - bs backoffStrategy - balancer Balancer - block bool - insecure bool - timeout time.Duration - scChan <-chan ServiceConfig - copts transport.ConnectOptions + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + codec Codec + cp Compressor + dc Decompressor + bs backoffStrategy + balancer Balancer + block bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + copts transport.ConnectOptions + callOptions []CallOption } +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = 1024 * 1024 * 4 +) + // DialOption configures how we set up the connection. type DialOption func(*dialOptions) +// WithInitialWindowSize returns a DialOption which sets the value for initial window size on a stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return func(o *dialOptions) { + o.copts.InitialWindowSize = s + } +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for initial window size on a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + } +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) + } +} + // WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling. func WithCodec(c Codec) DialOption { return func(o *dialOptions) { @@ -194,7 +229,7 @@ func WithTransportCredentials(creds credentials.TransportCredentials) DialOption } // WithPerRPCCredentials returns a DialOption which sets -// credentials which will place auth state on each outbound RPC. +// credentials and places auth state on each outbound RPC. func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { return func(o *dialOptions) { o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) @@ -231,7 +266,7 @@ func WithStatsHandler(h stats.Handler) DialOption { } } -// FailOnNonTempDialError returns a DialOption that specified if gRPC fails on non-temporary dial errors. +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on non-temporary dial errors. // If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network // address and won't try to reconnect. // The default value of FailOnNonTempDialError is false. @@ -249,6 +284,13 @@ func WithUserAgent(s string) DialOption { } } +// WithKeepaliveParams returns a DialOption that specifies keepalive paramaters for the client transport. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { + return func(o *dialOptions) { + o.copts.KeepaliveParams = kp + } +} + // WithUnaryInterceptor returns a DialOption that specifies the interceptor for unary RPCs. func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { return func(o *dialOptions) { @@ -278,19 +320,35 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { } // DialContext creates a client connection to the given target. ctx can be used to -// cancel or expire the pending connecting. Once this function returns, the +// cancel or expire the pending connection. Once this function returns, the // cancellation and expiration of ctx will be noop. Users should call ClientConn.Close // to terminate all the pending operations after this function returns. -// This is the EXPERIMENTAL API. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, conns: make(map[Address]*addrConn), } cc.ctx, cc.cancel = context.WithCancel(context.Background()) + for _, opt := range opts { opt(&cc.dopts) } + cc.mkp = cc.dopts.copts.KeepaliveParams + + if cc.dopts.copts.Dialer == nil { + cc.dopts.copts.Dialer = newProxyDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + return dialContext(ctx, "tcp", addr) + }, + ) + } + + if cc.dopts.copts.UserAgent != "" { + cc.dopts.copts.UserAgent += " " + grpcUA + } else { + cc.dopts.copts.UserAgent = grpcUA + } + if cc.dopts.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) @@ -309,15 +367,16 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() + scSet := false if cc.dopts.scChan != nil { - // Wait for the initial service config. + // Try to get an initial service config. select { case sc, ok := <-cc.dopts.scChan: if ok { cc.sc = sc + scSet = true } - case <-ctx.Done(): - return nil, ctx.Err() + default: } } // Set defaults. @@ -333,53 +392,44 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } else if cc.dopts.insecure && cc.dopts.copts.Authority != "" { cc.authority = cc.dopts.copts.Authority } else { - colonPos := strings.LastIndex(target, ":") - if colonPos == -1 { - colonPos = len(target) - } - cc.authority = target[:colonPos] + cc.authority = target } - var ok bool waitC := make(chan error, 1) go func() { - var addrs []Address + defer close(waitC) if cc.dopts.balancer == nil && cc.sc.LB != nil { cc.dopts.balancer = cc.sc.LB } - if cc.dopts.balancer == nil { - // Connect to target directly if balancer is nil. - addrs = append(addrs, Address{Addr: target}) - } else { + if cc.dopts.balancer != nil { var credsClone credentials.TransportCredentials if creds != nil { credsClone = creds.Clone() } config := BalancerConfig{ DialCreds: credsClone, + Dialer: cc.dopts.copts.Dialer, } if err := cc.dopts.balancer.Start(target, config); err != nil { waitC <- err return } ch := cc.dopts.balancer.Notify() - if ch == nil { - // There is no name resolver installed. - addrs = append(addrs, Address{Addr: target}) - } else { - addrs, ok = <-ch - if !ok || len(addrs) == 0 { - waitC <- errNoAddr - return + if ch != nil { + if cc.dopts.block { + doneChan := make(chan struct{}) + go cc.lbWatcher(doneChan) + <-doneChan + } else { + go cc.lbWatcher(nil) } - } - } - for _, a := range addrs { - if err := cc.resetAddrConn(a, false, nil); err != nil { - waitC <- err return } } - close(waitC) + // No balancer, or no resolver within the balancer. Connect directly. + if err := cc.resetAddrConn(Address{Addr: target}, cc.dopts.block, nil); err != nil { + waitC <- err + return + } }() select { case <-ctx.Done(): @@ -389,16 +439,21 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * return nil, err } } - - // If balancer is nil or balancer.Notify() is nil, ok will be false here. - // The lbWatcher goroutine will not be created. - if ok { - go cc.lbWatcher() + if cc.dopts.scChan != nil && !scSet { + // Blocking wait for the initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = sc + } + case <-ctx.Done(): + return nil, ctx.Err() + } } - if cc.dopts.scChan != nil { go cc.scWatcher() } + return cc, nil } @@ -447,9 +502,14 @@ type ClientConn struct { mu sync.RWMutex sc ServiceConfig conns map[Address]*addrConn + // Keepalive parameter can be udated if a GoAway is received. + mkp keepalive.ClientParameters } -func (cc *ClientConn) lbWatcher() { +// lbWatcher watches the Notify channel of the balancer in cc and manages +// connections accordingly. If doneChan is not nil, it is closed after the +// first successfull connection is made. +func (cc *ClientConn) lbWatcher(doneChan chan struct{}) { for addrs := range cc.dopts.balancer.Notify() { var ( add []Address // Addresses need to setup connections. @@ -476,7 +536,15 @@ func (cc *ClientConn) lbWatcher() { } cc.mu.Unlock() for _, a := range add { - cc.resetAddrConn(a, true, nil) + if doneChan != nil { + err := cc.resetAddrConn(a, true, nil) + if err == nil { + close(doneChan) + doneChan = nil + } + } else { + cc.resetAddrConn(a, false, nil) + } } for _, c := range del { c.tearDown(errConnDrain) @@ -505,12 +573,15 @@ func (cc *ClientConn) scWatcher() { // resetAddrConn creates an addrConn for addr and adds it to cc.conns. // If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason. // If tearDownErr is nil, errConnDrain will be used instead. -func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr error) error { +func (cc *ClientConn) resetAddrConn(addr Address, block bool, tearDownErr error) error { ac := &addrConn{ cc: cc, addr: addr, dopts: cc.dopts, } + cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = cc.mkp + cc.mu.RUnlock() ac.ctx, ac.cancel = context.WithCancel(cc.ctx) ac.stateCV = sync.NewCond(&ac.mu) if EnableTracing { @@ -555,8 +626,7 @@ func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr err stale.tearDown(tearDownErr) } } - // skipWait may overwrite the decision in ac.dopts.block. - if ac.dopts.block && !skipWait { + if block { if err := ac.resetTransport(false); err != nil { if err != errConnClosing { // Tear down ac and delete it from cc.conns. @@ -589,12 +659,23 @@ func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr err return nil } -// TODO: Avoid the locking here. -func (cc *ClientConn) getMethodConfig(method string) (m MethodConfig, ok bool) { +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the default config +// under the service (i.e /service/). If there is a default MethodConfig for +// the serivce, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. cc.mu.RLock() defer cc.mu.RUnlock() - m, ok = cc.sc.Methods[method] - return + m, ok := cc.sc.Methods[method] + if !ok { + i := strings.LastIndex(method, "/") + m, _ = cc.sc.Methods[method[:i+1]] + } + return m } func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) { @@ -635,6 +716,7 @@ func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) } if !ok { if put != nil { + updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false}) put() } return nil, nil, errConnClosing @@ -642,6 +724,7 @@ func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) t, err := ac.wait(ctx, cc.dopts.balancer != nil, !opts.BlockingWait) if err != nil { if put != nil { + updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false}) put() } return nil, nil, err @@ -693,6 +776,20 @@ type addrConn struct { tearDownErr error } +// adjustParams updates parameters used to create transports upon +// receiving a GoAway. +func (ac *addrConn) adjustParams(r transport.GoAwayReason) { + switch r { + case transport.TooManyPings: + v := 2 * ac.dopts.copts.KeepaliveParams.Time + ac.cc.mu.Lock() + if v > ac.cc.mkp.Time { + ac.cc.mkp.Time = v + } + ac.cc.mu.Unlock() + } +} + // printf records an event in ac's event log, unless ac has been closed. // REQUIRES ac.mu is held. func (ac *addrConn) printf(format string, a ...interface{}) { @@ -777,6 +874,8 @@ func (ac *addrConn) resetTransport(closeTransport bool) error { Metadata: ac.addr.Metadata, } newTransport, err := transport.NewClientTransport(ctx, sinfo, ac.dopts.copts) + // Don't call cancel in success path due to a race in Go 1.6: + // https://github.com/golang/go/issues/15078. if err != nil { cancel() @@ -799,11 +898,14 @@ func (ac *addrConn) resetTransport(closeTransport bool) error { } ac.mu.Unlock() closeTransport = false + timer := time.NewTimer(sleepTime - time.Since(connectTime)) select { - case <-time.After(sleepTime - time.Since(connectTime)): + case <-timer.C: case <-ac.ctx.Done(): + timer.Stop() return ac.ctx.Err() } + timer.Stop() continue } ac.mu.Lock() @@ -847,6 +949,7 @@ func (ac *addrConn) transportMonitor() { } return case <-t.GoAway(): + ac.adjustParams(t.GetGoAwayReason()) // If GoAway happens without any network I/O error, ac is closed without shutting down the // underlying transport (the transport will be closed when all the pending RPCs finished or // failed.). @@ -855,9 +958,9 @@ func (ac *addrConn) transportMonitor() { // In both cases, a new ac is created. select { case <-t.Error(): - ac.cc.resetAddrConn(ac.addr, true, errNetworkIO) + ac.cc.resetAddrConn(ac.addr, false, errNetworkIO) default: - ac.cc.resetAddrConn(ac.addr, true, errConnDrain) + ac.cc.resetAddrConn(ac.addr, false, errConnDrain) } return case <-t.Error(): @@ -866,7 +969,8 @@ func (ac *addrConn) transportMonitor() { t.Close() return case <-t.GoAway(): - ac.cc.resetAddrConn(ac.addr, true, errNetworkIO) + ac.adjustParams(t.GetGoAwayReason()) + ac.cc.resetAddrConn(ac.addr, false, errNetworkIO) return default: } diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go new file mode 100644 index 0000000000..001804d37d --- /dev/null +++ b/vendor/google.golang.org/grpc/codec.go @@ -0,0 +1,119 @@ +/* +* + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * +*/ + +package grpc + +import ( + "math" + "sync" + + "github.com/golang/protobuf/proto" +) + +// Codec defines the interface gRPC uses to encode and decode messages. +// Note that implementations of this interface must be thread safe; +// a Codec's methods can be called from concurrent goroutines. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // String returns the name of the Codec implementation. The returned + // string will be used as part of content type in transmission. + String() string +} + +// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC. +type protoCodec struct { +} + +type cachedProtoBuffer struct { + lastMarshaledSize uint32 + proto.Buffer +} + +func capToMaxInt32(val int) uint32 { + if val > math.MaxInt32 { + return uint32(math.MaxInt32) + } + return uint32(val) +} + +func (p protoCodec) marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { + protoMsg := v.(proto.Message) + newSlice := make([]byte, 0, cb.lastMarshaledSize) + + cb.SetBuf(newSlice) + cb.Reset() + if err := cb.Marshal(protoMsg); err != nil { + return nil, err + } + out := cb.Bytes() + cb.lastMarshaledSize = capToMaxInt32(len(out)) + return out, nil +} + +func (p protoCodec) Marshal(v interface{}) ([]byte, error) { + cb := protoBufferPool.Get().(*cachedProtoBuffer) + out, err := p.marshal(v, cb) + + // put back buffer and lose the ref to the slice + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return out, err +} + +func (p protoCodec) Unmarshal(data []byte, v interface{}) error { + cb := protoBufferPool.Get().(*cachedProtoBuffer) + cb.SetBuf(data) + v.(proto.Message).Reset() + err := cb.Unmarshal(v.(proto.Message)) + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return err +} + +func (protoCodec) String() string { + return "proto" +} + +var ( + protoBufferPool = &sync.Pool{ + New: func() interface{} { + return &cachedProtoBuffer{ + Buffer: proto.Buffer{}, + lastMarshaledSize: 16, + } + }, + } +) diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index e14b464acf..dcae5cc260 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -44,7 +44,7 @@ const ( // OK is returned on success. OK Code = 0 - // Canceled indicates the operation was cancelled (typically by the caller). + // Canceled indicates the operation was canceled (typically by the caller). Canceled Code = 1 // Unknown error. An example of where this error may be returned is diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 4d45c3e3c7..b16dbd6f45 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -102,6 +102,10 @@ type TransportCredentials interface { // authentication protocol on rawConn for clients. It returns the authenticated // connection and the corresponding auth information about the connection. // Implementations must use the provided context to implement timely cancellation. + // gRPC will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). + // If the returned error is a wrapper error, implementations should make sure that + // the error implements Temporary() to have the correct retry behaviors. ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns // the authenticated connection and the corresponding auth information about @@ -192,14 +196,14 @@ func NewTLS(c *tls.Config) TransportCredentials { return tc } -// NewClientTLSFromCert constructs a TLS from the input certificate for client. +// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. // serverNameOverride is for testing only. If set to a non empty string, // it will override the virtual host name of authority (e.g. :authority header field) in requests. func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) } -// NewClientTLSFromFile constructs a TLS from the input certificate file for client. +// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. // serverNameOverride is for testing only. If set to a non empty string, // it will override the virtual host name of authority (e.g. :authority header field) in requests. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { @@ -214,12 +218,12 @@ func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredent return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil } -// NewServerTLSFromCert constructs a TLS from the input certificate for server. +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) } -// NewServerTLSFromFile constructs a TLS from the input certificate file and key +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key // file for server. func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { cert, err := tls.LoadX509KeyPair(certFile, keyFile) diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go index 25393cc641..126bc7847a 100644 --- a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go +++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -37,6 +37,7 @@ package oauth import ( "fmt" "io/ioutil" + "sync" "golang.org/x/net/context" "golang.org/x/oauth2" @@ -132,20 +133,27 @@ func NewComputeEngine() credentials.PerRPCCredentials { // serviceAccount represents PerRPCCredentials via JWT signing key. type serviceAccount struct { + mu sync.Mutex config *jwt.Config -} - -func (s serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - token, err := s.config.TokenSource(ctx).Token() - if err != nil { - return nil, err + t *oauth2.Token +} + +func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + s.mu.Lock() + defer s.mu.Unlock() + if !s.t.Valid() { + var err error + s.t, err = s.config.TokenSource(ctx).Token() + if err != nil { + return nil, err + } } return map[string]string{ - "authorization": token.TokenType + " " + token.AccessToken, + "authorization": s.t.TokenType + " " + s.t.AccessToken, }, nil } -func (s serviceAccount) RequireTransportSecurity() bool { +func (s *serviceAccount) RequireTransportSecurity() bool { return true } @@ -156,7 +164,7 @@ func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerR if err != nil { return nil, err } - return serviceAccount{config: config}, nil + return &serviceAccount{config: config}, nil } // NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file diff --git a/vendor/google.golang.org/grpc/examples/helloworld/mock/mock_helloworld/hw_mock.go b/vendor/google.golang.org/grpc/examples/helloworld/mock_helloworld/hw_mock.go similarity index 100% rename from vendor/google.golang.org/grpc/examples/helloworld/mock/mock_helloworld/hw_mock.go rename to vendor/google.golang.org/grpc/examples/helloworld/mock_helloworld/hw_mock.go diff --git a/vendor/google.golang.org/grpc/examples/route_guide/client/client.go b/vendor/google.golang.org/grpc/examples/route_guide/client/client.go index fff6398d4d..b43420d81e 100644 --- a/vendor/google.golang.org/grpc/examples/route_guide/client/client.go +++ b/vendor/google.golang.org/grpc/examples/route_guide/client/client.go @@ -34,7 +34,7 @@ // Package main implements a simple gRPC client that demonstrates how to use gRPC-Go libraries // to perform unary, client streaming, server streaming and full duplex RPCs. // -// It interacts with the route guide service whose definition can be found in proto/route_guide.proto. +// It interacts with the route guide service whose definition can be found in routeguide/route_guide.proto. package main import ( diff --git a/vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock.go b/vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock.go new file mode 100644 index 0000000000..328c929fa8 --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock.go @@ -0,0 +1,200 @@ +// Automatically generated by MockGen. DO NOT EDIT! +// Source: google.golang.org/grpc/examples/route_guide/routeguide (interfaces: RouteGuideClient,RouteGuide_RouteChatClient) + +package mock_routeguide + +import ( + gomock "github.com/golang/mock/gomock" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + routeguide "google.golang.org/grpc/examples/route_guide/routeguide" + metadata "google.golang.org/grpc/metadata" +) + +// Mock of RouteGuideClient interface +type MockRouteGuideClient struct { + ctrl *gomock.Controller + recorder *_MockRouteGuideClientRecorder +} + +// Recorder for MockRouteGuideClient (not exported) +type _MockRouteGuideClientRecorder struct { + mock *MockRouteGuideClient +} + +func NewMockRouteGuideClient(ctrl *gomock.Controller) *MockRouteGuideClient { + mock := &MockRouteGuideClient{ctrl: ctrl} + mock.recorder = &_MockRouteGuideClientRecorder{mock} + return mock +} + +func (_m *MockRouteGuideClient) EXPECT() *_MockRouteGuideClientRecorder { + return _m.recorder +} + +func (_m *MockRouteGuideClient) GetFeature(_param0 context.Context, _param1 *routeguide.Point, _param2 ...grpc.CallOption) (*routeguide.Feature, error) { + _s := []interface{}{_param0, _param1} + for _, _x := range _param2 { + _s = append(_s, _x) + } + ret := _m.ctrl.Call(_m, "GetFeature", _s...) + ret0, _ := ret[0].(*routeguide.Feature) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockRouteGuideClientRecorder) GetFeature(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + _s := append([]interface{}{arg0, arg1}, arg2...) + return _mr.mock.ctrl.RecordCall(_mr.mock, "GetFeature", _s...) +} + +func (_m *MockRouteGuideClient) ListFeatures(_param0 context.Context, _param1 *routeguide.Rectangle, _param2 ...grpc.CallOption) (routeguide.RouteGuide_ListFeaturesClient, error) { + _s := []interface{}{_param0, _param1} + for _, _x := range _param2 { + _s = append(_s, _x) + } + ret := _m.ctrl.Call(_m, "ListFeatures", _s...) + ret0, _ := ret[0].(routeguide.RouteGuide_ListFeaturesClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockRouteGuideClientRecorder) ListFeatures(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + _s := append([]interface{}{arg0, arg1}, arg2...) + return _mr.mock.ctrl.RecordCall(_mr.mock, "ListFeatures", _s...) +} + +func (_m *MockRouteGuideClient) RecordRoute(_param0 context.Context, _param1 ...grpc.CallOption) (routeguide.RouteGuide_RecordRouteClient, error) { + _s := []interface{}{_param0} + for _, _x := range _param1 { + _s = append(_s, _x) + } + ret := _m.ctrl.Call(_m, "RecordRoute", _s...) + ret0, _ := ret[0].(routeguide.RouteGuide_RecordRouteClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockRouteGuideClientRecorder) RecordRoute(arg0 interface{}, arg1 ...interface{}) *gomock.Call { + _s := append([]interface{}{arg0}, arg1...) + return _mr.mock.ctrl.RecordCall(_mr.mock, "RecordRoute", _s...) +} + +func (_m *MockRouteGuideClient) RouteChat(_param0 context.Context, _param1 ...grpc.CallOption) (routeguide.RouteGuide_RouteChatClient, error) { + _s := []interface{}{_param0} + for _, _x := range _param1 { + _s = append(_s, _x) + } + ret := _m.ctrl.Call(_m, "RouteChat", _s...) + ret0, _ := ret[0].(routeguide.RouteGuide_RouteChatClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockRouteGuideClientRecorder) RouteChat(arg0 interface{}, arg1 ...interface{}) *gomock.Call { + _s := append([]interface{}{arg0}, arg1...) + return _mr.mock.ctrl.RecordCall(_mr.mock, "RouteChat", _s...) +} + +// Mock of RouteGuide_RouteChatClient interface +type MockRouteGuide_RouteChatClient struct { + ctrl *gomock.Controller + recorder *_MockRouteGuide_RouteChatClientRecorder +} + +// Recorder for MockRouteGuide_RouteChatClient (not exported) +type _MockRouteGuide_RouteChatClientRecorder struct { + mock *MockRouteGuide_RouteChatClient +} + +func NewMockRouteGuide_RouteChatClient(ctrl *gomock.Controller) *MockRouteGuide_RouteChatClient { + mock := &MockRouteGuide_RouteChatClient{ctrl: ctrl} + mock.recorder = &_MockRouteGuide_RouteChatClientRecorder{mock} + return mock +} + +func (_m *MockRouteGuide_RouteChatClient) EXPECT() *_MockRouteGuide_RouteChatClientRecorder { + return _m.recorder +} + +func (_m *MockRouteGuide_RouteChatClient) CloseSend() error { + ret := _m.ctrl.Call(_m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) CloseSend() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "CloseSend") +} + +func (_m *MockRouteGuide_RouteChatClient) Context() context.Context { + ret := _m.ctrl.Call(_m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) Context() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Context") +} + +func (_m *MockRouteGuide_RouteChatClient) Header() (metadata.MD, error) { + ret := _m.ctrl.Call(_m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) Header() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Header") +} + +func (_m *MockRouteGuide_RouteChatClient) Recv() (*routeguide.RouteNote, error) { + ret := _m.ctrl.Call(_m, "Recv") + ret0, _ := ret[0].(*routeguide.RouteNote) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) Recv() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Recv") +} + +func (_m *MockRouteGuide_RouteChatClient) RecvMsg(_param0 interface{}) error { + ret := _m.ctrl.Call(_m, "RecvMsg", _param0) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) RecvMsg(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "RecvMsg", arg0) +} + +func (_m *MockRouteGuide_RouteChatClient) Send(_param0 *routeguide.RouteNote) error { + ret := _m.ctrl.Call(_m, "Send", _param0) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) Send(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Send", arg0) +} + +func (_m *MockRouteGuide_RouteChatClient) SendMsg(_param0 interface{}) error { + ret := _m.ctrl.Call(_m, "SendMsg", _param0) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) SendMsg(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "SendMsg", arg0) +} + +func (_m *MockRouteGuide_RouteChatClient) Trailer() metadata.MD { + ret := _m.ctrl.Call(_m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) Trailer() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Trailer") +} diff --git a/vendor/google.golang.org/grpc/examples/route_guide/server/server.go b/vendor/google.golang.org/grpc/examples/route_guide/server/server.go index 5932722bfd..38073cb5fe 100644 --- a/vendor/google.golang.org/grpc/examples/route_guide/server/server.go +++ b/vendor/google.golang.org/grpc/examples/route_guide/server/server.go @@ -34,7 +34,7 @@ // Package main implements a simple gRPC server that demonstrates how to use gRPC-Go libraries // to perform unary, client streaming, server streaming and full duplex RPCs. // -// It implements the route guide service whose definition can be found in proto/route_guide.proto. +// It implements the route guide service whose definition can be found in routeguide/route_guide.proto. package main import ( diff --git a/vendor/google.golang.org/grpc/go16.go b/vendor/google.golang.org/grpc/go16.go new file mode 100644 index 0000000000..eb4020334b --- /dev/null +++ b/vendor/google.golang.org/grpc/go16.go @@ -0,0 +1,113 @@ +// +build go1.6,!go1.7 + +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "fmt" + "io" + "net" + "net/http" + "os" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" + + "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req.Cancel = ctx.Done() + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.StreamError: + return status.Error(e.Code, e.Desc) + case transport.ConnectionError: + return status.Error(codes.Internal, e.Desc) + default: + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + case ErrClientConnClosing: + return status.Error(codes.FailedPrecondition, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} + +// convertCode converts a standard Go error into its canonical code. Note that +// this is only used to translate the error returned by the server applications. +func convertCode(err error) codes.Code { + switch err { + case nil: + return codes.OK + case io.EOF: + return codes.OutOfRange + case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: + return codes.FailedPrecondition + case os.ErrInvalid: + return codes.InvalidArgument + case context.Canceled: + return codes.Canceled + case context.DeadlineExceeded: + return codes.DeadlineExceeded + } + switch { + case os.IsExist(err): + return codes.AlreadyExists + case os.IsNotExist(err): + return codes.NotFound + case os.IsPermission(err): + return codes.PermissionDenied + } + return codes.Unknown +} diff --git a/vendor/google.golang.org/grpc/go17.go b/vendor/google.golang.org/grpc/go17.go new file mode 100644 index 0000000000..b9eb781d13 --- /dev/null +++ b/vendor/google.golang.org/grpc/go17.go @@ -0,0 +1,113 @@ +// +build go1.7 + +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "context" + "io" + "net" + "net/http" + "os" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" + + netctx "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, network, address) +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return err + } + return nil +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.StreamError: + return status.Error(e.Code, e.Desc) + case transport.ConnectionError: + return status.Error(codes.Internal, e.Desc) + default: + switch err { + case context.DeadlineExceeded, netctx.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled, netctx.Canceled: + return status.Error(codes.Canceled, err.Error()) + case ErrClientConnClosing: + return status.Error(codes.FailedPrecondition, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} + +// convertCode converts a standard Go error into its canonical code. Note that +// this is only used to translate the error returned by the server applications. +func convertCode(err error) codes.Code { + switch err { + case nil: + return codes.OK + case io.EOF: + return codes.OutOfRange + case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: + return codes.FailedPrecondition + case os.ErrInvalid: + return codes.InvalidArgument + case context.Canceled, netctx.Canceled: + return codes.Canceled + case context.DeadlineExceeded, netctx.DeadlineExceeded: + return codes.DeadlineExceeded + } + switch { + case os.IsExist(err): + return codes.AlreadyExists + case os.IsNotExist(err): + return codes.NotFound + case os.IsPermission(err): + return codes.PermissionDenied + } + return codes.Unknown +} diff --git a/vendor/google.golang.org/grpc/grpclb/grpclb.go b/vendor/google.golang.org/grpc/grpclb.go similarity index 53% rename from vendor/google.golang.org/grpc/grpclb/grpclb.go rename to vendor/google.golang.org/grpc/grpclb.go index d9a1a8b6fb..8638fc92a1 100644 --- a/vendor/google.golang.org/grpc/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/grpclb.go @@ -31,19 +31,17 @@ * */ -// Package grpclb implements the load balancing protocol defined at -// https://github.com/grpc/grpc/blob/master/doc/load-balancing.md. -// The implementation is currently EXPERIMENTAL. -package grpclb +package grpc import ( "errors" "fmt" + "math/rand" + "net" "sync" "time" "golang.org/x/net/context" - "google.golang.org/grpc" "google.golang.org/grpc/codes" lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1" "google.golang.org/grpc/grpclog" @@ -51,6 +49,43 @@ import ( "google.golang.org/grpc/naming" ) +// Client API for LoadBalancer service. +// Mostly copied from generated pb.go file. +// To avoid circular dependency. +type loadBalancerClient struct { + cc *ClientConn +} + +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption) (*balanceLoadClientStream, error) { + desc := &StreamDesc{ + StreamName: "BalanceLoad", + ServerStreams: true, + ClientStreams: true, + } + stream, err := NewClientStream(ctx, desc, c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + if err != nil { + return nil, err + } + x := &balanceLoadClientStream{stream} + return x, nil +} + +type balanceLoadClientStream struct { + ClientStream +} + +func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) { + m := new(lbpb.LoadBalanceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // AddressType indicates the address type returned by name resolution. type AddressType uint8 @@ -61,18 +96,18 @@ const ( GRPCLB ) -// Metadata contains the information the name resolution for grpclb should provide. The -// name resolver used by grpclb balancer is required to provide this type of metadata in +// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The +// name resolver used by the grpclb balancer is required to provide this type of metadata in // its address updates. -type Metadata struct { +type AddrMetadataGRPCLB struct { // AddrType is the type of server (grpc load balancer or backend). AddrType AddressType // ServerName is the name of the grpc load balancer. Used for authentication. ServerName string } -// Balancer creates a grpclb load balancer. -func Balancer(r naming.Resolver) grpc.Balancer { +// NewGRPCLBBalancer creates a grpclb load balancer. +func NewGRPCLBBalancer(r naming.Resolver) Balancer { return &balancer{ r: r, } @@ -84,42 +119,46 @@ type remoteBalancerInfo struct { name string } -// addrInfo consists of the information of a backend server. -type addrInfo struct { - addr grpc.Address +// grpclbAddrInfo consists of the information of a backend server. +type grpclbAddrInfo struct { + addr Address connected bool - // dropRequest indicates whether a particular RPC which chooses this address - // should be dropped. - dropRequest bool + // dropForRateLimiting indicates whether this particular request should be + // dropped by the client for rate limiting. + dropForRateLimiting bool + // dropForLoadBalancing indicates whether this particular request should be + // dropped by the client for load balancing. + dropForLoadBalancing bool } type balancer struct { r naming.Resolver + target string mu sync.Mutex seq int // a sequence number to make sure addrCh does not get stale addresses. w naming.Watcher - addrCh chan []grpc.Address + addrCh chan []Address rbs []remoteBalancerInfo - addrs []*addrInfo + addrs []*grpclbAddrInfo next int waitCh chan struct{} done bool expTimer *time.Timer + rand *rand.Rand + + clientStats lbpb.ClientStats } -func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan remoteBalancerInfo) error { +func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error { updates, err := w.Next() if err != nil { + grpclog.Printf("grpclb: failed to get next addr update from watcher: %v", err) return err } b.mu.Lock() defer b.mu.Unlock() if b.done { - return grpc.ErrClientConnClosing - } - var bAddr remoteBalancerInfo - if len(b.rbs) > 0 { - bAddr = b.rbs[0] + return ErrClientConnClosing } for _, update := range updates { switch update.Op { @@ -135,7 +174,7 @@ func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan remoteBalancerInfo if exist { continue } - md, ok := update.Metadata.(*Metadata) + md, ok := update.Metadata.(*AddrMetadataGRPCLB) if !ok { // TODO: Revisit the handling here and may introduce some fallback mechanism. grpclog.Printf("The name resolution contains unexpected metadata %v", update.Metadata) @@ -169,16 +208,11 @@ func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan remoteBalancerInfo } // TODO: Fall back to the basic round-robin load balancing if the resulting address is // not a load balancer. - if len(b.rbs) > 0 { - // For simplicity, always use the first one now. May revisit this decision later. - if b.rbs[0] != bAddr { - select { - case <-ch: - default: - } - ch <- b.rbs[0] - } + select { + case <-ch: + default: } + ch <- b.rbs return nil } @@ -211,18 +245,19 @@ func (b *balancer) processServerList(l *lbpb.ServerList, seq int) { servers := l.GetServers() expiration := convertDuration(l.GetExpirationInterval()) var ( - sl []*addrInfo - addrs []grpc.Address + sl []*grpclbAddrInfo + addrs []Address ) for _, s := range servers { md := metadata.Pairs("lb-token", s.LoadBalanceToken) - addr := grpc.Address{ - Addr: fmt.Sprintf("%s:%d", s.IpAddress, s.Port), + addr := Address{ + Addr: fmt.Sprintf("%s:%d", net.IP(s.IpAddress), s.Port), Metadata: &md, } - sl = append(sl, &addrInfo{ - addr: addr, - dropRequest: s.DropRequest, + sl = append(sl, &grpclbAddrInfo{ + addr: addr, + dropForRateLimiting: s.DropForRateLimiting, + dropForLoadBalancing: s.DropForLoadBalancing, }) addrs = append(addrs, addr) } @@ -249,12 +284,41 @@ func (b *balancer) processServerList(l *lbpb.ServerList, seq int) { return } -func (b *balancer) callRemoteBalancer(lbc lbpb.LoadBalancerClient, seq int) (retry bool) { +func (b *balancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + case <-done: + return + } + b.mu.Lock() + stats := b.clientStats + b.clientStats = lbpb.ClientStats{} // Clear the stats. + b.mu.Unlock() + t := time.Now() + stats.Timestamp = &lbpb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := s.Send(&lbpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{ + ClientStats: &stats, + }, + }); err != nil { + grpclog.Printf("grpclb: failed to send load report: %v", err) + return + } + } +} + +func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - stream, err := lbc.BalanceLoad(ctx, grpc.FailFast(false)) + stream, err := lbc.BalanceLoad(ctx) if err != nil { - grpclog.Printf("Failed to perform RPC to the remote balancer %v", err) + grpclog.Printf("grpclb: failed to perform RPC to the remote balancer %v", err) return } b.mu.Lock() @@ -265,21 +329,25 @@ func (b *balancer) callRemoteBalancer(lbc lbpb.LoadBalancerClient, seq int) (ret b.mu.Unlock() initReq := &lbpb.LoadBalanceRequest{ LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{ - InitialRequest: new(lbpb.InitialLoadBalanceRequest), + InitialRequest: &lbpb.InitialLoadBalanceRequest{ + Name: b.target, + }, }, } if err := stream.Send(initReq); err != nil { + grpclog.Printf("grpclb: failed to send init request: %v", err) // TODO: backoff on retry? return true } reply, err := stream.Recv() if err != nil { + grpclog.Printf("grpclb: failed to recv init response: %v", err) // TODO: backoff on retry? return true } initResp := reply.GetInitialResponse() if initResp == nil { - grpclog.Println("Failed to receive the initial response from the remote balancer.") + grpclog.Println("grpclb: reply from remote balancer did not include initial response.") return } // TODO: Support delegation. @@ -288,10 +356,19 @@ func (b *balancer) callRemoteBalancer(lbc lbpb.LoadBalancerClient, seq int) (ret grpclog.Println("TODO: Delegation is not supported yet.") return } + streamDone := make(chan struct{}) + defer close(streamDone) + b.mu.Lock() + b.clientStats = lbpb.ClientStats{} // Clear client stats. + b.mu.Unlock() + if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 { + go b.sendLoadReport(stream, d, streamDone) + } // Retrieve the server list. for { reply, err := stream.Recv() if err != nil { + grpclog.Printf("grpclb: failed to recv server list: %v", err) break } b.mu.Lock() @@ -309,85 +386,163 @@ func (b *balancer) callRemoteBalancer(lbc lbpb.LoadBalancerClient, seq int) (ret return true } -func (b *balancer) Start(target string, config grpc.BalancerConfig) error { +func (b *balancer) Start(target string, config BalancerConfig) error { + b.rand = rand.New(rand.NewSource(time.Now().Unix())) // TODO: Fall back to the basic direct connection if there is no name resolver. if b.r == nil { return errors.New("there is no name resolver installed") } + b.target = target b.mu.Lock() if b.done { b.mu.Unlock() - return grpc.ErrClientConnClosing + return ErrClientConnClosing } - b.addrCh = make(chan []grpc.Address) + b.addrCh = make(chan []Address) w, err := b.r.Resolve(target) if err != nil { b.mu.Unlock() + grpclog.Printf("grpclb: failed to resolve address: %v, err: %v", target, err) return err } b.w = w b.mu.Unlock() - balancerAddrCh := make(chan remoteBalancerInfo, 1) + balancerAddrsCh := make(chan []remoteBalancerInfo, 1) // Spawn a goroutine to monitor the name resolution of remote load balancer. go func() { for { - if err := b.watchAddrUpdates(w, balancerAddrCh); err != nil { - grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err) - close(balancerAddrCh) + if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil { + grpclog.Printf("grpclb: the naming watcher stops working due to %v.\n", err) + close(balancerAddrsCh) return } } }() // Spawn a goroutine to talk to the remote load balancer. go func() { - var cc *grpc.ClientConn - for { - rb, ok := <-balancerAddrCh + var ( + cc *ClientConn + // ccError is closed when there is an error in the current cc. + // A new rb should be picked from rbs and connected. + ccError chan struct{} + rb *remoteBalancerInfo + rbs []remoteBalancerInfo + rbIdx int + ) + + defer func() { + if ccError != nil { + select { + case <-ccError: + default: + close(ccError) + } + } if cc != nil { cc.Close() } - if !ok { - // b is closing. - return + }() + + for { + var ok bool + select { + case rbs, ok = <-balancerAddrsCh: + if !ok { + return + } + foundIdx := -1 + if rb != nil { + for i, trb := range rbs { + if trb == *rb { + foundIdx = i + break + } + } + } + if foundIdx >= 0 { + if foundIdx >= 1 { + // Move the address in use to the beginning of the list. + b.rbs[0], b.rbs[foundIdx] = b.rbs[foundIdx], b.rbs[0] + rbIdx = 0 + } + continue // If found, don't dial new cc. + } else if len(rbs) > 0 { + // Pick a random one from the list, instead of always using the first one. + if l := len(rbs); l > 1 && rb != nil { + tmpIdx := b.rand.Intn(l - 1) + b.rbs[0], b.rbs[tmpIdx] = b.rbs[tmpIdx], b.rbs[0] + } + rbIdx = 0 + rb = &rbs[0] + } else { + // foundIdx < 0 && len(rbs) <= 0. + rb = nil + } + case <-ccError: + ccError = nil + if rbIdx < len(rbs)-1 { + rbIdx++ + rb = &rbs[rbIdx] + } else { + rb = nil + } + } + + if rb == nil { + continue + } + + if cc != nil { + cc.Close() } // Talk to the remote load balancer to get the server list. - var err error - creds := config.DialCreds - if creds == nil { - cc, err = grpc.Dial(rb.addr, grpc.WithInsecure()) - } else { + var ( + err error + dopts []DialOption + ) + if creds := config.DialCreds; creds != nil { if rb.name != "" { if err := creds.OverrideServerName(rb.name); err != nil { - grpclog.Printf("Failed to override the server name in the credentials: %v", err) + grpclog.Printf("grpclb: failed to override the server name in the credentials: %v", err) continue } } - cc, err = grpc.Dial(rb.addr, grpc.WithTransportCredentials(creds)) + dopts = append(dopts, WithTransportCredentials(creds)) + } else { + dopts = append(dopts, WithInsecure()) + } + if dialer := config.Dialer; dialer != nil { + // WithDialer takes a different type of function, so we instead use a special DialOption here. + dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer }) } + ccError = make(chan struct{}) + cc, err = Dial(rb.addr, dopts...) if err != nil { - grpclog.Printf("Failed to setup a connection to the remote balancer %v: %v", rb.addr, err) - return + grpclog.Printf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err) + close(ccError) + continue } b.mu.Lock() b.seq++ // tick when getting a new balancer address seq := b.seq b.next = 0 b.mu.Unlock() - go func(cc *grpc.ClientConn) { - lbc := lbpb.NewLoadBalancerClient(cc) - for { - if retry := b.callRemoteBalancer(lbc, seq); !retry { - cc.Close() - return - } + go func(cc *ClientConn, ccError chan struct{}) { + lbc := &loadBalancerClient{cc} + b.callRemoteBalancer(lbc, seq) + cc.Close() + select { + case <-ccError: + default: + close(ccError) } - }(cc) + }(cc, ccError) } }() return nil } -func (b *balancer) down(addr grpc.Address, err error) { +func (b *balancer) down(addr Address, err error) { b.mu.Lock() defer b.mu.Unlock() for _, a := range b.addrs { @@ -398,7 +553,7 @@ func (b *balancer) down(addr grpc.Address, err error) { } } -func (b *balancer) Up(addr grpc.Address) func(error) { +func (b *balancer) Up(addr Address) func(error) { b.mu.Lock() defer b.mu.Unlock() if b.done { @@ -412,7 +567,7 @@ func (b *balancer) Up(addr grpc.Address) func(error) { } a.connected = true } - if a.connected && !a.dropRequest { + if a.connected && !a.dropForRateLimiting && !a.dropForLoadBalancing { cnt++ } } @@ -426,15 +581,40 @@ func (b *balancer) Up(addr grpc.Address) func(error) { } } -func (b *balancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (addr grpc.Address, put func(), err error) { +func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { var ch chan struct{} b.mu.Lock() if b.done { b.mu.Unlock() - err = grpc.ErrClientConnClosing + err = ErrClientConnClosing return } + seq := b.seq + defer func() { + if err != nil { + return + } + put = func() { + s, ok := rpcInfoFromContext(ctx) + if !ok { + return + } + b.mu.Lock() + defer b.mu.Unlock() + if b.done || seq < b.seq { + return + } + b.clientStats.NumCallsFinished++ + if !s.bytesSent { + b.clientStats.NumCallsFinishedWithClientFailedToSend++ + } else if s.bytesReceived { + b.clientStats.NumCallsFinishedKnownReceived++ + } + } + }() + + b.clientStats.NumCallsStarted++ if len(b.addrs) > 0 { if b.next >= len(b.addrs) { b.next = 0 @@ -444,7 +624,7 @@ func (b *balancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (addr a := b.addrs[next] next = (next + 1) % len(b.addrs) if a.connected { - if !a.dropRequest { + if !a.dropForRateLimiting && !a.dropForLoadBalancing { addr = a.addr b.next = next b.mu.Unlock() @@ -452,8 +632,15 @@ func (b *balancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (addr } if !opts.BlockingWait { b.next = next + if a.dropForLoadBalancing { + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithDropForLoadBalancing++ + } else if a.dropForRateLimiting { + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithDropForRateLimiting++ + } b.mu.Unlock() - err = grpc.Errorf(codes.Unavailable, "%s drops requests", a.addr.Addr) + err = Errorf(codes.Unavailable, "%s drops requests", a.addr.Addr) return } } @@ -465,8 +652,10 @@ func (b *balancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (addr } if !opts.BlockingWait { if len(b.addrs) == 0 { + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithClientFailedToSend++ b.mu.Unlock() - err = grpc.Errorf(codes.Unavailable, "there is no address available") + err = Errorf(codes.Unavailable, "there is no address available") return } // Returns the next addr on b.addrs for a failfast RPC. @@ -486,13 +675,19 @@ func (b *balancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (addr for { select { case <-ctx.Done(): + b.mu.Lock() + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithClientFailedToSend++ + b.mu.Unlock() err = ctx.Err() return case <-ch: b.mu.Lock() if b.done { + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithClientFailedToSend++ b.mu.Unlock() - err = grpc.ErrClientConnClosing + err = ErrClientConnClosing return } @@ -505,7 +700,7 @@ func (b *balancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (addr a := b.addrs[next] next = (next + 1) % len(b.addrs) if a.connected { - if !a.dropRequest { + if !a.dropForRateLimiting && !a.dropForLoadBalancing { addr = a.addr b.next = next b.mu.Unlock() @@ -513,8 +708,15 @@ func (b *balancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (addr } if !opts.BlockingWait { b.next = next + if a.dropForLoadBalancing { + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithDropForLoadBalancing++ + } else if a.dropForRateLimiting { + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithDropForRateLimiting++ + } b.mu.Unlock() - err = grpc.Errorf(codes.Unavailable, "drop requests for the addreess %s", a.addr.Addr) + err = Errorf(codes.Unavailable, "drop requests for the addreess %s", a.addr.Addr) return } } @@ -536,13 +738,16 @@ func (b *balancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (addr } } -func (b *balancer) Notify() <-chan []grpc.Address { +func (b *balancer) Notify() <-chan []Address { return b.addrCh } func (b *balancer) Close() error { b.mu.Lock() defer b.mu.Unlock() + if b.done { + return errBalancerClosed + } b.done = true if b.expTimer != nil { b.expTimer.Stop() diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go index 7be89477db..f63941bd80 100644 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go @@ -10,6 +10,7 @@ It is generated from these files: It has these top-level messages: Duration + Timestamp LoadBalanceRequest InitialLoadBalanceRequest ClientStats @@ -24,11 +25,6 @@ import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf @@ -58,6 +54,51 @@ func (m *Duration) String() string { return proto.CompactTextString(m func (*Duration) ProtoMessage() {} func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + type LoadBalanceRequest struct { // Types that are valid to be assigned to LoadBalanceRequestType: // *LoadBalanceRequest_InitialRequest @@ -68,17 +109,17 @@ type LoadBalanceRequest struct { func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} } func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) } func (*LoadBalanceRequest) ProtoMessage() {} -func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } type isLoadBalanceRequest_LoadBalanceRequestType interface { isLoadBalanceRequest_LoadBalanceRequestType() } type LoadBalanceRequest_InitialRequest struct { - InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,oneof"` + InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,oneof"` } type LoadBalanceRequest_ClientStats struct { - ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,oneof"` + ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,oneof"` } func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {} @@ -180,30 +221,98 @@ func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) { } type InitialLoadBalanceRequest struct { - // Name of load balanced service (IE, service.grpc.gslb.google.com) + // Name of load balanced service (IE, balancer.service.com) + // length should be less than 256 bytes. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} } func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) } func (*InitialLoadBalanceRequest) ProtoMessage() {} -func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *InitialLoadBalanceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} // Contains client level statistics that are useful to load balancing. Each -// count should be reset to zero after reporting the stats. +// count except the timestamp should be reset to zero after reporting the stats. type ClientStats struct { - // The total number of requests sent by the client since the last report. - TotalRequests int64 `protobuf:"varint,1,opt,name=total_requests" json:"total_requests,omitempty"` - // The number of client rpc errors since the last report. - ClientRpcErrors int64 `protobuf:"varint,2,opt,name=client_rpc_errors" json:"client_rpc_errors,omitempty"` - // The number of dropped requests since the last report. - DroppedRequests int64 `protobuf:"varint,3,opt,name=dropped_requests" json:"dropped_requests,omitempty"` + // The timestamp of generating the report. + Timestamp *Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"` + // The total number of RPCs that started. + NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted" json:"num_calls_started,omitempty"` + // The total number of RPCs that finished. + NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished" json:"num_calls_finished,omitempty"` + // The total number of RPCs that were dropped by the client because of rate + // limiting. + NumCallsFinishedWithDropForRateLimiting int64 `protobuf:"varint,4,opt,name=num_calls_finished_with_drop_for_rate_limiting,json=numCallsFinishedWithDropForRateLimiting" json:"num_calls_finished_with_drop_for_rate_limiting,omitempty"` + // The total number of RPCs that were dropped by the client because of load + // balancing. + NumCallsFinishedWithDropForLoadBalancing int64 `protobuf:"varint,5,opt,name=num_calls_finished_with_drop_for_load_balancing,json=numCallsFinishedWithDropForLoadBalancing" json:"num_calls_finished_with_drop_for_load_balancing,omitempty"` + // The total number of RPCs that failed to reach a server except dropped RPCs. + NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"` + // The total number of RPCs that finished and are known to have been received + // by a server. + NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"` } func (m *ClientStats) Reset() { *m = ClientStats{} } func (m *ClientStats) String() string { return proto.CompactTextString(m) } func (*ClientStats) ProtoMessage() {} -func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ClientStats) GetTimestamp() *Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *ClientStats) GetNumCallsStarted() int64 { + if m != nil { + return m.NumCallsStarted + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinished() int64 { + if m != nil { + return m.NumCallsFinished + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedWithDropForRateLimiting() int64 { + if m != nil { + return m.NumCallsFinishedWithDropForRateLimiting + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedWithDropForLoadBalancing() int64 { + if m != nil { + return m.NumCallsFinishedWithDropForLoadBalancing + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 { + if m != nil { + return m.NumCallsFinishedWithClientFailedToSend + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 { + if m != nil { + return m.NumCallsFinishedKnownReceived + } + return 0 +} type LoadBalanceResponse struct { // Types that are valid to be assigned to LoadBalanceResponseType: @@ -215,17 +324,17 @@ type LoadBalanceResponse struct { func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} } func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) } func (*LoadBalanceResponse) ProtoMessage() {} -func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } type isLoadBalanceResponse_LoadBalanceResponseType interface { isLoadBalanceResponse_LoadBalanceResponseType() } type LoadBalanceResponse_InitialResponse struct { - InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,oneof"` + InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,oneof"` } type LoadBalanceResponse_ServerList struct { - ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,oneof"` + ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,oneof"` } func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} @@ -330,18 +439,26 @@ type InitialLoadBalanceResponse struct { // This is an application layer redirect that indicates the client should use // the specified server for load balancing. When this field is non-empty in // the response, the client should open a separate connection to the - // load_balancer_delegate and call the BalanceLoad method. - LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate" json:"load_balancer_delegate,omitempty"` + // load_balancer_delegate and call the BalanceLoad method. Its length should + // be less than 64 bytes. + LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate" json:"load_balancer_delegate,omitempty"` // This interval defines how often the client should send the client stats // to the load balancer. Stats should only be reported when the duration is // positive. - ClientStatsReportInterval *Duration `protobuf:"bytes,3,opt,name=client_stats_report_interval" json:"client_stats_report_interval,omitempty"` + ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"` } func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} } func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) } func (*InitialLoadBalanceResponse) ProtoMessage() {} -func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string { + if m != nil { + return m.LoadBalancerDelegate + } + return "" +} func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *Duration { if m != nil { @@ -360,13 +477,13 @@ type ServerList struct { // list as valid. It may be considered stale after waiting this interval of // time after receiving the list. If the interval is not positive, the // client can assume the list is valid until the next list is received. - ExpirationInterval *Duration `protobuf:"bytes,3,opt,name=expiration_interval" json:"expiration_interval,omitempty"` + ExpirationInterval *Duration `protobuf:"bytes,3,opt,name=expiration_interval,json=expirationInterval" json:"expiration_interval,omitempty"` } func (m *ServerList) Reset() { *m = ServerList{} } func (m *ServerList) String() string { return proto.CompactTextString(m) } func (*ServerList) ProtoMessage() {} -func (*ServerList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*ServerList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *ServerList) GetServers() []*Server { if m != nil { @@ -382,176 +499,131 @@ func (m *ServerList) GetExpirationInterval() *Duration { return nil } +// Contains server information. When none of the [drop_for_*] fields are true, +// use the other fields. When drop_for_rate_limiting is true, ignore all other +// fields. Use drop_for_load_balancing only when it is true and +// drop_for_rate_limiting is false. type Server struct { // A resolved address for the server, serialized in network-byte-order. It may // either be an IPv4 or IPv6 address. - IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,proto3" json:"ip_address,omitempty"` + IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` // A resolved port number for the server. Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` // An opaque but printable token given to the frontend for each pick. All // frontend requests for that pick must include the token in its initial // metadata. The token is used by the backend to verify the request and to // allow the backend to report load to the gRPC LB system. - LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token" json:"load_balance_token,omitempty"` + // + // Its length is variable but less than 50 bytes. + LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken" json:"load_balance_token,omitempty"` // Indicates whether this particular request should be dropped by the client - // when this server is chosen from the list. - DropRequest bool `protobuf:"varint,4,opt,name=drop_request" json:"drop_request,omitempty"` + // for rate limiting. + DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"` + // Indicates whether this particular request should be dropped by the client + // for load balancing. + DropForLoadBalancing bool `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"` } func (m *Server) Reset() { *m = Server{} } func (m *Server) String() string { return proto.CompactTextString(m) } func (*Server) ProtoMessage() {} -func (*Server) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*Server) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } -func init() { - proto.RegisterType((*Duration)(nil), "grpc.lb.v1.Duration") - proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest") - proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest") - proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats") - proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse") - proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse") - proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList") - proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for LoadBalancer service - -type LoadBalancerClient interface { - // Bidirectional rpc to get a list of servers. - BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) -} - -type loadBalancerClient struct { - cc *grpc.ClientConn -} - -func NewLoadBalancerClient(cc *grpc.ClientConn) LoadBalancerClient { - return &loadBalancerClient{cc} -} - -func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { - stream, err := grpc.NewClientStream(ctx, &_LoadBalancer_serviceDesc.Streams[0], c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) - if err != nil { - return nil, err +func (m *Server) GetIpAddress() []byte { + if m != nil { + return m.IpAddress } - x := &loadBalancerBalanceLoadClient{stream} - return x, nil -} - -type LoadBalancer_BalanceLoadClient interface { - Send(*LoadBalanceRequest) error - Recv() (*LoadBalanceResponse, error) - grpc.ClientStream -} - -type loadBalancerBalanceLoadClient struct { - grpc.ClientStream -} - -func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error { - return x.ClientStream.SendMsg(m) + return nil } -func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) { - m := new(LoadBalanceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (m *Server) GetPort() int32 { + if m != nil { + return m.Port } - return m, nil -} - -// Server API for LoadBalancer service - -type LoadBalancerServer interface { - // Bidirectional rpc to get a list of servers. - BalanceLoad(LoadBalancer_BalanceLoadServer) error -} - -func RegisterLoadBalancerServer(s *grpc.Server, srv LoadBalancerServer) { - s.RegisterService(&_LoadBalancer_serviceDesc, srv) -} - -func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream}) -} - -type LoadBalancer_BalanceLoadServer interface { - Send(*LoadBalanceResponse) error - Recv() (*LoadBalanceRequest, error) - grpc.ServerStream + return 0 } -type loadBalancerBalanceLoadServer struct { - grpc.ServerStream +func (m *Server) GetLoadBalanceToken() string { + if m != nil { + return m.LoadBalanceToken + } + return "" } -func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error { - return x.ServerStream.SendMsg(m) +func (m *Server) GetDropForRateLimiting() bool { + if m != nil { + return m.DropForRateLimiting + } + return false } -func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) { - m := new(LoadBalanceRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err +func (m *Server) GetDropForLoadBalancing() bool { + if m != nil { + return m.DropForLoadBalancing } - return m, nil + return false } -var _LoadBalancer_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.lb.v1.LoadBalancer", - HandlerType: (*LoadBalancerServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "BalanceLoad", - Handler: _LoadBalancer_BalanceLoad_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "grpclb.proto", +func init() { + proto.RegisterType((*Duration)(nil), "grpc.lb.v1.Duration") + proto.RegisterType((*Timestamp)(nil), "grpc.lb.v1.Timestamp") + proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest") + proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest") + proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats") + proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse") + proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse") + proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList") + proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server") } func init() { proto.RegisterFile("grpclb.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 471 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x93, 0x51, 0x6f, 0xd3, 0x3e, - 0x14, 0xc5, 0x9b, 0x7f, 0xb7, 0xfd, 0xb7, 0x9b, 0xc0, 0xc6, 0xdd, 0x54, 0xda, 0x32, 0x8d, 0x2a, - 0x08, 0x54, 0x90, 0x08, 0x2c, 0xbc, 0xf1, 0x84, 0x0a, 0x0f, 0x45, 0xda, 0xd3, 0xf6, 0x86, 0x90, - 0x2c, 0x27, 0xb9, 0x9a, 0x2c, 0x82, 0x6d, 0x6c, 0xaf, 0x1a, 0xdf, 0x07, 0xf1, 0x39, 0x91, 0xe3, - 0x94, 0x64, 0x54, 0x15, 0xbc, 0xc5, 0xbe, 0x3e, 0xf7, 0x1e, 0xff, 0x7c, 0x02, 0xc9, 0xb5, 0xd1, - 0x65, 0x5d, 0x64, 0xda, 0x28, 0xa7, 0x10, 0xfc, 0x2a, 0xab, 0x8b, 0x6c, 0x75, 0x9e, 0xbe, 0x80, - 0xfd, 0x0f, 0x37, 0x86, 0x3b, 0xa1, 0x24, 0x1e, 0xc2, 0xff, 0x96, 0x4a, 0x25, 0x2b, 0x3b, 0x8e, - 0x66, 0xd1, 0x7c, 0x88, 0xf7, 0x60, 0x57, 0x72, 0xa9, 0xec, 0xf8, 0xbf, 0x59, 0x34, 0xdf, 0x4d, - 0x7f, 0x44, 0x80, 0x17, 0x8a, 0x57, 0x0b, 0x5e, 0x73, 0x59, 0xd2, 0x25, 0x7d, 0xbb, 0x21, 0xeb, - 0xf0, 0x1d, 0x1c, 0x0a, 0x29, 0x9c, 0xe0, 0x35, 0x33, 0x61, 0xab, 0x91, 0xc7, 0xf9, 0xd3, 0xac, - 0x1b, 0x94, 0x7d, 0x0c, 0x47, 0x36, 0xf5, 0xcb, 0x01, 0xbe, 0x82, 0xa4, 0xac, 0x05, 0x49, 0xc7, - 0xac, 0xe3, 0x2e, 0x8c, 0x8b, 0xf3, 0x87, 0x7d, 0xf9, 0xfb, 0xa6, 0x7e, 0xe5, 0xcb, 0xcb, 0xc1, - 0xe2, 0x11, 0x4c, 0x6a, 0xc5, 0x2b, 0x56, 0x84, 0x4e, 0xeb, 0xb9, 0xcc, 0x7d, 0xd7, 0x94, 0x3e, - 0x87, 0xc9, 0xd6, 0x61, 0x98, 0xc0, 0x8e, 0xe4, 0x5f, 0xa9, 0x71, 0x78, 0x90, 0x7e, 0x82, 0xb8, - 0xd7, 0x18, 0x47, 0x70, 0xdf, 0x29, 0xd7, 0xdd, 0x63, 0xcd, 0x61, 0x02, 0x0f, 0x5a, 0x7f, 0x46, - 0x97, 0x8c, 0x8c, 0x51, 0x26, 0x98, 0x1c, 0xe2, 0x18, 0x8e, 0x2a, 0xa3, 0xb4, 0xa6, 0xaa, 0x13, - 0x0d, 0x7d, 0x25, 0xfd, 0x19, 0xc1, 0xf1, 0x1d, 0x03, 0x56, 0x2b, 0x69, 0x09, 0x17, 0x70, 0xd4, - 0xe1, 0x0a, 0x7b, 0x2d, 0xaf, 0x67, 0x7f, 0xe3, 0x15, 0x4e, 0x2f, 0x07, 0xf8, 0x12, 0x62, 0x4b, - 0x66, 0x45, 0x86, 0xd5, 0xc2, 0xba, 0x96, 0xd7, 0xa8, 0x2f, 0xbf, 0x6a, 0xca, 0x17, 0xc2, 0xf3, - 0x5d, 0x9c, 0xc2, 0xf4, 0x0f, 0x5c, 0xa1, 0x53, 0xe0, 0x75, 0x0b, 0xd3, 0xed, 0xc3, 0xf0, 0x0c, - 0x46, 0x7d, 0xad, 0x61, 0x15, 0xd5, 0x74, 0xcd, 0x5d, 0x8b, 0x10, 0xdf, 0xc2, 0x69, 0xff, 0xed, - 0x98, 0x21, 0xad, 0x8c, 0x63, 0x42, 0x3a, 0x32, 0x2b, 0x5e, 0x37, 0x30, 0xe2, 0xfc, 0xa4, 0xef, - 0x6d, 0x1d, 0xb8, 0xb4, 0x02, 0xe8, 0x7c, 0xe2, 0x13, 0x1f, 0x3f, 0xbf, 0xf2, 0xd8, 0x87, 0xf3, - 0x38, 0xc7, 0xcd, 0x0b, 0xe1, 0x39, 0x1c, 0xd3, 0xad, 0x16, 0xa1, 0xc1, 0xbf, 0x4d, 0xf9, 0x0c, - 0x7b, 0xad, 0x18, 0x01, 0x84, 0x66, 0xbc, 0xaa, 0x0c, 0xd9, 0xf0, 0xb6, 0x89, 0x0f, 0x84, 0x37, - 0x1c, 0x22, 0x8e, 0x53, 0xc0, 0x3b, 0xa4, 0x9c, 0xfa, 0x42, 0xb2, 0xe9, 0x7e, 0x80, 0x27, 0x90, - 0xf8, 0xa7, 0xfe, 0x1d, 0xf2, 0x9d, 0x59, 0x34, 0xdf, 0xcf, 0x0b, 0x48, 0x7a, 0xd8, 0x0c, 0x5e, - 0x42, 0xdc, 0x7e, 0xfb, 0x6d, 0x3c, 0xeb, 0x5b, 0xda, 0xcc, 0xe3, 0xf4, 0xf1, 0xd6, 0x7a, 0xe0, - 0x3f, 0x8f, 0x5e, 0x47, 0xc5, 0x5e, 0xf3, 0xdf, 0xbe, 0xf9, 0x15, 0x00, 0x00, 0xff, 0xff, 0x01, - 0x8b, 0xc9, 0x26, 0xc7, 0x03, 0x00, 0x00, + // 733 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x39, + 0x14, 0x66, 0x36, 0xfc, 0xe5, 0x24, 0x5a, 0x58, 0x93, 0x85, 0xc0, 0xc2, 0x2e, 0x1b, 0xa9, 0x34, + 0xaa, 0x68, 0x68, 0x43, 0x7b, 0xd1, 0x9f, 0x9b, 0x02, 0x45, 0x41, 0xe5, 0xa2, 0x72, 0xa8, 0x7a, + 0x55, 0x59, 0x4e, 0xc6, 0x80, 0xc5, 0xc4, 0x9e, 0xda, 0x4e, 0x68, 0x2f, 0x7b, 0xd9, 0x47, 0xe9, + 0x63, 0x54, 0x7d, 0x86, 0xbe, 0x4f, 0x65, 0x7b, 0x26, 0x33, 0x90, 0x1f, 0xd4, 0xbb, 0xf1, 0xf1, + 0x77, 0xbe, 0xf3, 0xf9, 0xd8, 0xdf, 0x19, 0x28, 0x5f, 0xa8, 0xb8, 0x1b, 0x75, 0x1a, 0xb1, 0x92, + 0x46, 0x22, 0xb0, 0xab, 0x46, 0xd4, 0x69, 0x0c, 0x1e, 0xd7, 0x9e, 0xc3, 0xe2, 0x51, 0x5f, 0x51, + 0xc3, 0xa5, 0x40, 0x55, 0x58, 0xd0, 0xac, 0x2b, 0x45, 0xa8, 0xab, 0xc1, 0x76, 0x50, 0x2f, 0xe0, + 0x74, 0x89, 0x2a, 0x30, 0x27, 0xa8, 0x90, 0xba, 0xfa, 0xc7, 0x76, 0x50, 0x9f, 0xc3, 0x7e, 0x51, + 0x7b, 0x01, 0xc5, 0x33, 0xde, 0x63, 0xda, 0xd0, 0x5e, 0xfc, 0xdb, 0xc9, 0xdf, 0x03, 0x40, 0xa7, + 0x92, 0x86, 0x07, 0x34, 0xa2, 0xa2, 0xcb, 0x30, 0xfb, 0xd8, 0x67, 0xda, 0xa0, 0xb7, 0xb0, 0xc4, + 0x05, 0x37, 0x9c, 0x46, 0x44, 0xf9, 0x90, 0xa3, 0x2b, 0x35, 0xef, 0x35, 0x32, 0xd5, 0x8d, 0x13, + 0x0f, 0x19, 0xcd, 0x6f, 0xcd, 0xe0, 0x3f, 0x93, 0xfc, 0x94, 0xf1, 0x25, 0x94, 0xbb, 0x11, 0x67, + 0xc2, 0x10, 0x6d, 0xa8, 0xf1, 0x2a, 0x4a, 0xcd, 0xb5, 0x3c, 0xdd, 0xa1, 0xdb, 0x6f, 0xdb, 0xed, + 0xd6, 0x0c, 0x2e, 0x75, 0xb3, 0xe5, 0xc1, 0x3f, 0xb0, 0x1e, 0x49, 0x1a, 0x92, 0x8e, 0x2f, 0x93, + 0x8a, 0x22, 0xe6, 0x73, 0xcc, 0x6a, 0x7b, 0xb0, 0x3e, 0x51, 0x09, 0x42, 0x30, 0x2b, 0x68, 0x8f, + 0x39, 0xf9, 0x45, 0xec, 0xbe, 0x6b, 0x5f, 0x67, 0xa1, 0x94, 0x2b, 0x86, 0xf6, 0xa1, 0x68, 0xd2, + 0x0e, 0x26, 0xe7, 0xfc, 0x3b, 0x2f, 0x6c, 0xd8, 0x5e, 0x9c, 0xe1, 0xd0, 0x03, 0xf8, 0x4b, 0xf4, + 0x7b, 0xa4, 0x4b, 0xa3, 0x48, 0xdb, 0x33, 0x29, 0xc3, 0x42, 0x77, 0xaa, 0x02, 0x5e, 0x12, 0xfd, + 0xde, 0xa1, 0x8d, 0xb7, 0x7d, 0x18, 0xed, 0x02, 0xca, 0xb0, 0xe7, 0x5c, 0x70, 0x7d, 0xc9, 0xc2, + 0x6a, 0xc1, 0x81, 0x97, 0x53, 0xf0, 0x71, 0x12, 0x47, 0x04, 0x1a, 0xa3, 0x68, 0x72, 0xcd, 0xcd, + 0x25, 0x09, 0x95, 0x8c, 0xc9, 0xb9, 0x54, 0x44, 0x51, 0xc3, 0x48, 0xc4, 0x7b, 0xdc, 0x70, 0x71, + 0x51, 0x9d, 0x75, 0x4c, 0xf7, 0x6f, 0x33, 0xbd, 0xe7, 0xe6, 0xf2, 0x48, 0xc9, 0xf8, 0x58, 0x2a, + 0x4c, 0x0d, 0x3b, 0x4d, 0xe0, 0x88, 0xc2, 0xde, 0x9d, 0x05, 0x72, 0xed, 0xb6, 0x15, 0xe6, 0x5c, + 0x85, 0xfa, 0x94, 0x0a, 0x59, 0xef, 0x6d, 0x89, 0x0f, 0xf0, 0x70, 0x52, 0x89, 0xe4, 0x19, 0x9c, + 0x53, 0x1e, 0xb1, 0x90, 0x18, 0x49, 0x34, 0x13, 0x61, 0x75, 0xde, 0x15, 0xd8, 0x19, 0x57, 0xc0, + 0x5f, 0xd5, 0xb1, 0xc3, 0x9f, 0xc9, 0x36, 0x13, 0x21, 0x6a, 0xc1, 0xff, 0x63, 0xe8, 0xaf, 0x84, + 0xbc, 0x16, 0x44, 0xb1, 0x2e, 0xe3, 0x03, 0x16, 0x56, 0x17, 0x1c, 0xe5, 0xd6, 0x6d, 0xca, 0x37, + 0x16, 0x85, 0x13, 0x50, 0xed, 0x47, 0x00, 0x2b, 0x37, 0x9e, 0x8d, 0x8e, 0xa5, 0xd0, 0x0c, 0xb5, + 0x61, 0x39, 0x73, 0x80, 0x8f, 0x25, 0x4f, 0x63, 0xe7, 0x2e, 0x0b, 0x78, 0x74, 0x6b, 0x06, 0x2f, + 0x0d, 0x3d, 0x90, 0x90, 0x3e, 0x83, 0x92, 0x66, 0x6a, 0xc0, 0x14, 0x89, 0xb8, 0x36, 0x89, 0x07, + 0x56, 0xf3, 0x7c, 0x6d, 0xb7, 0x7d, 0xca, 0x9d, 0x87, 0x40, 0x0f, 0x57, 0x07, 0x9b, 0xb0, 0x71, + 0xcb, 0x01, 0x9e, 0xd3, 0x5b, 0xe0, 0x5b, 0x00, 0x1b, 0x93, 0xa5, 0xa0, 0x27, 0xb0, 0x9a, 0x4f, + 0x56, 0x24, 0x64, 0x11, 0xbb, 0xa0, 0x26, 0xb5, 0x45, 0x25, 0xca, 0x92, 0xd4, 0x51, 0xb2, 0x87, + 0xde, 0xc1, 0x66, 0xde, 0xb2, 0x44, 0xb1, 0x58, 0x2a, 0x43, 0xb8, 0x30, 0x4c, 0x0d, 0x68, 0x94, + 0xc8, 0xaf, 0xe4, 0xe5, 0xa7, 0x43, 0x0c, 0xaf, 0xe7, 0xdc, 0x8b, 0x5d, 0xde, 0x49, 0x92, 0x56, + 0xfb, 0x12, 0x00, 0x64, 0xc7, 0x44, 0xbb, 0x76, 0x62, 0xd9, 0x95, 0x9d, 0x58, 0x85, 0x7a, 0xa9, + 0x89, 0x46, 0xfb, 0x81, 0x53, 0x08, 0x7a, 0x0d, 0x2b, 0xec, 0x53, 0xcc, 0x7d, 0x95, 0x4c, 0x4a, + 0x61, 0x8a, 0x14, 0x94, 0x25, 0x0c, 0x35, 0xfc, 0x0c, 0x60, 0xde, 0x53, 0xa3, 0x2d, 0x00, 0x1e, + 0x13, 0x1a, 0x86, 0x8a, 0x69, 0x3f, 0x34, 0xcb, 0xb8, 0xc8, 0xe3, 0x57, 0x3e, 0x60, 0xe7, 0x87, + 0x55, 0x9f, 0x4c, 0x4d, 0xf7, 0x6d, 0xed, 0x7c, 0xe3, 0x2e, 0x8c, 0xbc, 0x62, 0xc2, 0x69, 0x28, + 0xe2, 0xe5, 0x5c, 0x2b, 0xcf, 0x6c, 0x1c, 0xed, 0xc3, 0xea, 0x14, 0xdb, 0x2e, 0xe2, 0x95, 0x70, + 0x8c, 0x45, 0x9f, 0xc2, 0xda, 0x34, 0x2b, 0x2e, 0xe2, 0x4a, 0x38, 0xc6, 0x76, 0xcd, 0x0e, 0x94, + 0x73, 0xf7, 0xaf, 0x10, 0x86, 0x52, 0xf2, 0x6d, 0xc3, 0xe8, 0xdf, 0x7c, 0x83, 0x46, 0x87, 0xe5, + 0xc6, 0x7f, 0x13, 0xf7, 0xfd, 0x43, 0xaa, 0x07, 0x8f, 0x82, 0xce, 0xbc, 0xfb, 0x7d, 0xed, 0xff, + 0x0a, 0x00, 0x00, 0xff, 0xff, 0x64, 0xbf, 0xda, 0x5e, 0xce, 0x06, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/grpclb/grpclb_server_generated.go b/vendor/google.golang.org/grpc/grpclb/grpclb_server_generated.go new file mode 100644 index 0000000000..65ce6360c9 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb/grpclb_server_generated.go @@ -0,0 +1,54 @@ +// This file contains the generated server side code. +// It's only used for grpclb testing. + +package grpclb + +import ( + "google.golang.org/grpc" + lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1" +) + +// Server API for LoadBalancer service + +type loadBalancerServer interface { + // Bidirectional rpc to get a list of servers. + BalanceLoad(*loadBalancerBalanceLoadServer) error +} + +func registerLoadBalancerServer(s *grpc.Server, srv loadBalancerServer) { + s.RegisterService( + &grpc.ServiceDesc{ + ServiceName: "grpc.lb.v1.LoadBalancer", + HandlerType: (*loadBalancerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "BalanceLoad", + Handler: balanceLoadHandler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpclb.proto", + }, srv) +} + +func balanceLoadHandler(srv interface{}, stream grpc.ServerStream) error { + return srv.(loadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream}) +} + +type loadBalancerBalanceLoadServer struct { + grpc.ServerStream +} + +func (x *loadBalancerBalanceLoadServer) Send(m *lbpb.LoadBalanceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *loadBalancerBalanceLoadServer) Recv() (*lbpb.LoadBalanceRequest, error) { + m := new(lbpb.LoadBalanceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index 8d932efed7..19893be9d1 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -40,17 +40,17 @@ import ( // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error -// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. inovker is the handler to complete the RPC +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC // and it is the responsibility of the interceptor to call it. -// This is the EXPERIMENTAL API. +// This is an EXPERIMENTAL API. type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) // StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O -// operations. streamer is the handlder to create a ClientStream and it is the responsibility of the interceptor to call it. -// This is the EXPERIMENTAL API. +// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) // UnaryServerInfo consists of various information about a unary RPC on diff --git a/vendor/google.golang.org/grpc/interop/test_utils.go b/vendor/google.golang.org/grpc/interop/test_utils.go index e4e427c75e..15ec008395 100644 --- a/vendor/google.golang.org/grpc/interop/test_utils.go +++ b/vendor/google.golang.org/grpc/interop/test_utils.go @@ -392,7 +392,7 @@ func DoPerRPCCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, oauthScop } token := GetToken(serviceAccountKeyFile, oauthScope) kv := map[string]string{"authorization": token.TokenType + " " + token.AccessToken} - ctx := metadata.NewContext(context.Background(), metadata.MD{"authorization": []string{kv["authorization"]}}) + ctx := metadata.NewOutgoingContext(context.Background(), metadata.MD{"authorization": []string{kv["authorization"]}}) reply, err := tc.UnaryCall(ctx, req) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) @@ -416,7 +416,7 @@ var ( // DoCancelAfterBegin cancels the RPC after metadata has been sent but before payloads are sent. func DoCancelAfterBegin(tc testpb.TestServiceClient, args ...grpc.CallOption) { - ctx, cancel := context.WithCancel(metadata.NewContext(context.Background(), testMetadata)) + ctx, cancel := context.WithCancel(metadata.NewOutgoingContext(context.Background(), testMetadata)) stream, err := tc.StreamingInputCall(ctx, args...) if err != nil { grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) @@ -491,7 +491,7 @@ func DoCustomMetadata(tc testpb.TestServiceClient, args ...grpc.CallOption) { ResponseSize: proto.Int32(int32(1)), Payload: pl, } - ctx := metadata.NewContext(context.Background(), customMetadata) + ctx := metadata.NewOutgoingContext(context.Background(), customMetadata) var header, trailer metadata.MD args = append(args, grpc.Header(&header), grpc.Trailer(&trailer)) reply, err := tc.UnaryCall( @@ -627,7 +627,7 @@ func serverNewPayload(t testpb.PayloadType, size int32) (*testpb.Payload, error) func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { status := in.GetResponseStatus() - if md, ok := metadata.FromContext(ctx); ok { + if md, ok := metadata.FromIncomingContext(ctx); ok { if initialMetadata, ok := md[initialMetadataKey]; ok { header := metadata.Pairs(initialMetadataKey, initialMetadata[0]) grpc.SendHeader(ctx, header) @@ -686,7 +686,7 @@ func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInput } func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { - if md, ok := metadata.FromContext(stream.Context()); ok { + if md, ok := metadata.FromIncomingContext(stream.Context()); ok { if initialMetadata, ok := md[initialMetadataKey]; ok { header := metadata.Pairs(initialMetadataKey, initialMetadata[0]) stream.SendHeader(header) diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go new file mode 100644 index 0000000000..0f855ff46e --- /dev/null +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -0,0 +1,80 @@ +/* + * + * Copyright 2017, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package keepalive defines configurable parameters for point-to-point healthcheck. +package keepalive + +import ( + "time" +) + +// ClientParameters is used to set keepalive parameters on the client-side. +// These configure how the client will actively probe to notice when a connection is broken +// and send pings so intermediaries will be aware of the liveness of the connection. +// Make sure these parameters are set in coordination with the keepalive policy on the server, +// as incompatible settings can result in closing of connection. +type ClientParameters struct { + // After a duration of this time if the client doesn't see any activity it pings the server to see if the transport is still alive. + Time time.Duration // The current default value is infinity. + // After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that + // the connection is closed. + Timeout time.Duration // The current default value is 20 seconds. + // If true, client runs keepalive checks even with no active RPCs. + PermitWithoutStream bool // false by default. +} + +// ServerParameters is used to set keepalive and max-age parameters on the server-side. +type ServerParameters struct { + // MaxConnectionIdle is a duration for the amount of time after which an idle connection would be closed by sending a GoAway. + // Idleness duration is defined since the most recent time the number of outstanding RPCs became zero or the connection establishment. + MaxConnectionIdle time.Duration // The current default value is infinity. + // MaxConnectionAge is a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway. + // A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms. + MaxConnectionAge time.Duration // The current default value is infinity. + // MaxConnectinoAgeGrace is an additive period after MaxConnectionAge after which the connection will be forcibly closed. + MaxConnectionAgeGrace time.Duration // The current default value is infinity. + // After a duration of this time if the server doesn't see any activity it pings the client to see if the transport is still alive. + Time time.Duration // The current default value is 2 hours. + // After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that + // the connection is closed. + Timeout time.Duration // The current default value is 20 seconds. +} + +// EnforcementPolicy is used to set keepalive enforcement policy on the server-side. +// Server will close connection with a client that violates this policy. +type EnforcementPolicy struct { + // MinTime is the minimum amount of time a client should wait before sending a keepalive ping. + MinTime time.Duration // The current default value is 5 minutes. + // If true, server expects keepalive pings even when there are no active streams(RPCs). + PermitWithoutStream bool // false by default. +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 7332395028..06688134d5 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -36,58 +36,27 @@ package metadata // import "google.golang.org/grpc/metadata" import ( - "encoding/base64" "fmt" "strings" "golang.org/x/net/context" ) -const ( - binHdrSuffix = "-bin" -) - -// encodeKeyValue encodes key and value qualified for transmission via gRPC. -// Transmitting binary headers violates HTTP/2 spec. -// TODO(zhaoq): Maybe check if k is ASCII also. -func encodeKeyValue(k, v string) (string, string) { - k = strings.ToLower(k) - if strings.HasSuffix(k, binHdrSuffix) { - val := base64.StdEncoding.EncodeToString([]byte(v)) - v = string(val) - } - return k, v -} - -// DecodeKeyValue returns the original key and value corresponding to the -// encoded data in k, v. -// If k is a binary header and v contains comma, v is split on comma before decoded, -// and the decoded v will be joined with comma before returned. +// DecodeKeyValue returns k, v, nil. It is deprecated and should not be used. func DecodeKeyValue(k, v string) (string, string, error) { - if !strings.HasSuffix(k, binHdrSuffix) { - return k, v, nil - } - vvs := strings.Split(v, ",") - for i, vv := range vvs { - val, err := base64.StdEncoding.DecodeString(vv) - if err != nil { - return "", "", err - } - vvs[i] = string(val) - } - return k, strings.Join(vvs, ","), nil + return k, v, nil } // MD is a mapping from metadata keys to values. Users should use the following // two convenience functions New and Pairs to generate MD. type MD map[string][]string -// New creates a MD from given key-value map. -// Keys are automatically converted to lowercase. And for keys having "-bin" as suffix, their values will be applied Base64 encoding. +// New creates an MD from a given key-value map. +// Keys are automatically converted to lowercase. func New(m map[string]string) MD { md := MD{} - for k, v := range m { - key, val := encodeKeyValue(k, v) + for k, val := range m { + key := strings.ToLower(k) md[key] = append(md[key], val) } return md @@ -95,20 +64,19 @@ func New(m map[string]string) MD { // Pairs returns an MD formed by the mapping of key, value ... // Pairs panics if len(kv) is odd. -// Keys are automatically converted to lowercase. And for keys having "-bin" as suffix, their values will be appplied Base64 encoding. +// Keys are automatically converted to lowercase. func Pairs(kv ...string) MD { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) } md := MD{} - var k string + var key string for i, s := range kv { if i%2 == 0 { - k = s + key = strings.ToLower(s) continue } - key, val := encodeKeyValue(k, s) - md[key] = append(md[key], val) + md[key] = append(md[key], s) } return md } @@ -123,9 +91,9 @@ func (md MD) Copy() MD { return Join(md) } -// Join joins any number of MDs into a single MD. +// Join joins any number of mds into a single MD. // The order of values for each key is determined by the order in which -// the MDs containing those values are presented to Join. +// the mds containing those values are presented to Join. func Join(mds ...MD) MD { out := MD{} for _, md := range mds { @@ -136,17 +104,41 @@ func Join(mds ...MD) MD { return out } -type mdKey struct{} +type mdIncomingKey struct{} +type mdOutgoingKey struct{} -// NewContext creates a new context with md attached. +// NewContext is a wrapper for NewOutgoingContext(ctx, md). Deprecated. func NewContext(ctx context.Context, md MD) context.Context { - return context.WithValue(ctx, mdKey{}, md) + return NewOutgoingContext(ctx, md) } -// FromContext returns the MD in ctx if it exists. -// The returned md should be immutable, writing to it may cause races. -// Modification should be made to the copies of the returned md. +// NewIncomingContext creates a new context with incoming md attached. +func NewIncomingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdIncomingKey{}, md) +} + +// NewOutgoingContext creates a new context with outgoing md attached. +func NewOutgoingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdOutgoingKey{}, md) +} + +// FromContext is a wrapper for FromIncomingContext(ctx). Deprecated. func FromContext(ctx context.Context) (md MD, ok bool) { - md, ok = ctx.Value(mdKey{}).(MD) + return FromIncomingContext(ctx) +} + +// FromIncomingContext returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func FromIncomingContext(ctx context.Context) (md MD, ok bool) { + md, ok = ctx.Value(mdIncomingKey{}).(MD) + return +} + +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to the copies of the returned MD. +func FromOutgoingContext(ctx context.Context) (md MD, ok bool) { + md, ok = ctx.Value(mdOutgoingKey{}).(MD) return } diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index bfa6205ba9..fa7de9aff3 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -42,7 +42,8 @@ import ( "google.golang.org/grpc/credentials" ) -// Peer contains the information of the peer for an RPC. +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. type Peer struct { // Addr is the peer address. Addr net.Addr diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go new file mode 100644 index 0000000000..10188dc343 --- /dev/null +++ b/vendor/google.golang.org/grpc/proxy.go @@ -0,0 +1,145 @@ +/* + * + * Copyright 2017, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "bufio" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" + + "golang.org/x/net/context" +) + +var ( + // errDisabled indicates that proxy is disabled for the address. + errDisabled = errors.New("proxy is disabled for the address") + // The following variable will be overwritten in the tests. + httpProxyFromEnvironment = http.ProxyFromEnvironment +) + +func mapAddress(ctx context.Context, address string) (string, error) { + req := &http.Request{ + URL: &url.URL{ + Scheme: "https", + Host: address, + }, + } + url, err := httpProxyFromEnvironment(req) + if err != nil { + return "", err + } + if url == nil { + return "", errDisabled + } + return url.Host, nil +} + +// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. +// It's possible that this reader reads more than what's need for the response and stores +// those bytes in the buffer. +// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the +// bytes in the buffer. +type bufConn struct { + net.Conn + r io.Reader +} + +func (c *bufConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ net.Conn, err error) { + defer func() { + if err != nil { + conn.Close() + } + }() + + req := (&http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Host: addr}, + Header: map[string][]string{"User-Agent": {grpcUA}}, + }) + + if err := sendHTTPRequest(ctx, req, conn); err != nil { + return nil, fmt.Errorf("failed to write the HTTP request: %v", err) + } + + r := bufio.NewReader(conn) + resp, err := http.ReadResponse(r, req) + if err != nil { + return nil, fmt.Errorf("reading server HTTP response: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) + } + return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) + } + + return &bufConn{Conn: conn, r: r}, nil +} + +// newProxyDialer returns a dialer that connects to proxy first if necessary. +// The returned dialer checks if a proxy is necessary, dial to the proxy with the +// provided dialer, does HTTP CONNECT handshake and returns the connection. +func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { + return func(ctx context.Context, addr string) (conn net.Conn, err error) { + var skipHandshake bool + newAddr, err := mapAddress(ctx, addr) + if err != nil { + if err != errDisabled { + return nil, err + } + skipHandshake = true + newAddr = addr + } + + conn, err = dialer(ctx, newAddr) + if err != nil { + return + } + if !skipHandshake { + conn, err = doHTTPConnectHandshake(ctx, conn, addr) + } + return + } +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index d98a88376b..96c4be6d6b 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -37,48 +37,22 @@ import ( "bytes" "compress/gzip" "encoding/binary" - "fmt" "io" "io/ioutil" "math" - "os" + "sync" "time" - "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" "google.golang.org/grpc/transport" ) -// Codec defines the interface gRPC uses to encode and decode messages. -type Codec interface { - // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) - // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error - // String returns the name of the Codec implementation. The returned - // string will be used as part of content type in transmission. - String() string -} - -// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC. -type protoCodec struct{} - -func (protoCodec) Marshal(v interface{}) ([]byte, error) { - return proto.Marshal(v.(proto.Message)) -} - -func (protoCodec) Unmarshal(data []byte, v interface{}) error { - return proto.Unmarshal(data, v.(proto.Message)) -} - -func (protoCodec) String() string { - return "proto" -} - // Compressor defines the interface gRPC uses to compress a message. type Compressor interface { // Do compresses p into w. @@ -87,16 +61,24 @@ type Compressor interface { Type() string } -// NewGZIPCompressor creates a Compressor based on GZIP. -func NewGZIPCompressor() Compressor { - return &gzipCompressor{} +type gzipCompressor struct { + pool sync.Pool } -type gzipCompressor struct { +// NewGZIPCompressor creates a Compressor based on GZIP. +func NewGZIPCompressor() Compressor { + return &gzipCompressor{ + pool: sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(ioutil.Discard) + }, + }, + } } func (c *gzipCompressor) Do(w io.Writer, p []byte) error { - z := gzip.NewWriter(w) + z := c.pool.Get().(*gzip.Writer) + z.Reset(w) if _, err := z.Write(p); err != nil { return err } @@ -116,6 +98,7 @@ type Decompressor interface { } type gzipDecompressor struct { + pool sync.Pool } // NewGZIPDecompressor creates a Decompressor based on GZIP. @@ -124,11 +107,26 @@ func NewGZIPDecompressor() Decompressor { } func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { - z, err := gzip.NewReader(r) - if err != nil { - return nil, err + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } } - defer z.Close() + + defer func() { + z.Close() + d.pool.Put(z) + }() return ioutil.ReadAll(z) } @@ -138,11 +136,14 @@ func (d *gzipDecompressor) Type() string { // callInfo contains all related configuration and information about an RPC. type callInfo struct { - failFast bool - headerMD metadata.MD - trailerMD metadata.MD - peer *peer.Peer - traceInfo traceInfo // in trace.go + failFast bool + headerMD metadata.MD + trailerMD metadata.MD + peer *peer.Peer + traceInfo traceInfo // in trace.go + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials } var defaultCallInfo = callInfo{failFast: true} @@ -159,6 +160,14 @@ type CallOption interface { after(*callInfo) } +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo) {} + type beforeCall func(c *callInfo) error func (o beforeCall) before(c *callInfo) error { return o(c) } @@ -189,7 +198,9 @@ func Trailer(md *metadata.MD) CallOption { // unary RPC. func Peer(peer *peer.Peer) CallOption { return afterCall(func(c *callInfo) { - *peer = *c.peer + if c.peer != nil { + *peer = *c.peer + } }) } @@ -198,7 +209,8 @@ func Peer(peer *peer.Peer) CallOption { // immediately. Otherwise, the RPC client will block the call until a // connection is available (or the call is canceled or times out) and will retry // the call if it fails due to a transient error. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/fail_fast.md. Note: failFast is default to true. +// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// Note: failFast is default to true. func FailFast(failFast bool) CallOption { return beforeCall(func(c *callInfo) error { c.failFast = failFast @@ -206,6 +218,31 @@ func FailFast(failFast bool) CallOption { }) } +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. +func MaxCallRecvMsgSize(s int) CallOption { + return beforeCall(func(o *callInfo) error { + o.maxReceiveMessageSize = &s + return nil + }) +} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. +func MaxCallSendMsgSize(s int) CallOption { + return beforeCall(func(o *callInfo) error { + o.maxSendMessageSize = &s + return nil + }) +} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return beforeCall(func(c *callInfo) error { + c.creds = creds + return nil + }) +} + // The format of the payload: compressed or not? type payloadFormat uint8 @@ -239,8 +276,8 @@ type parser struct { // No other error values or types must be returned, which also means // that the underlying io.Reader must not return an incompatible // error. -func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := io.ReadFull(p.r, p.header[:]); err != nil { +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := p.r.Read(p.header[:]); err != nil { return 0, nil, err } @@ -250,13 +287,13 @@ func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err erro if length == 0 { return pf, nil, nil } - if length > uint32(maxMsgSize) { - return 0, nil, Errorf(codes.Internal, "grpc: received message length %d exceeding the max size %d", length, maxMsgSize) + if length > uint32(maxReceiveMessageSize) { + return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead // of making it for each message: msg = make([]byte, int(length)) - if _, err := io.ReadFull(p.r, msg); err != nil { + if _, err := p.r.Read(msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } @@ -277,7 +314,7 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayl // TODO(zhaoq): optimize to reduce memory alloc and copying. b, err = c.Marshal(msg) if err != nil { - return nil, err + return nil, Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } if outPayload != nil { outPayload.Payload = msg @@ -287,14 +324,14 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayl } if cp != nil { if err := cp.Do(cbuf, b); err != nil { - return nil, err + return nil, Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } b = cbuf.Bytes() } length = uint(len(b)) } if length > math.MaxUint32 { - return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length) + return nil, Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", length) } const ( @@ -335,8 +372,8 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) er return nil } -func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxMsgSize int, inPayload *stats.InPayload) error { - pf, d, err := p.recvMsg(maxMsgSize) +func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload) error { + pf, d, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return err } @@ -352,10 +389,10 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{ return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } } - if len(d) > maxMsgSize { + if len(d) > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return Errorf(codes.Internal, "grpc: received a message of %d bytes exceeding %d limit", len(d), maxMsgSize) + return Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) } if err := c.Unmarshal(d, m); err != nil { return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) @@ -370,116 +407,57 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{ return nil } -// rpcError defines the status from an RPC. -type rpcError struct { - code codes.Code - desc string +type rpcInfo struct { + bytesSent bool + bytesReceived bool } -func (e *rpcError) Error() string { - return fmt.Sprintf("rpc error: code = %d desc = %s", e.code, e.desc) +type rpcInfoContextKey struct{} + +func newContextWithRPCInfo(ctx context.Context) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{}) +} + +func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { + s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) + return +} + +func updateRPCInfoInContext(ctx context.Context, s rpcInfo) { + if ss, ok := rpcInfoFromContext(ctx); ok { + *ss = s + } + return } // Code returns the error code for err if it was produced by the rpc system. // Otherwise, it returns codes.Unknown. +// +// Deprecated; use status.FromError and Code method instead. func Code(err error) codes.Code { - if err == nil { - return codes.OK - } - if e, ok := err.(*rpcError); ok { - return e.code + if s, ok := status.FromError(err); ok { + return s.Code() } return codes.Unknown } // ErrorDesc returns the error description of err if it was produced by the rpc system. // Otherwise, it returns err.Error() or empty string when err is nil. +// +// Deprecated; use status.FromError and Message method instead. func ErrorDesc(err error) string { - if err == nil { - return "" - } - if e, ok := err.(*rpcError); ok { - return e.desc + if s, ok := status.FromError(err); ok { + return s.Message() } return err.Error() } // Errorf returns an error containing an error code and a description; // Errorf returns nil if c is OK. +// +// Deprecated; use status.Errorf instead. func Errorf(c codes.Code, format string, a ...interface{}) error { - if c == codes.OK { - return nil - } - return &rpcError{ - code: c, - desc: fmt.Sprintf(format, a...), - } -} - -// toRPCErr converts an error into a rpcError. -func toRPCErr(err error) error { - switch e := err.(type) { - case *rpcError: - return err - case transport.StreamError: - return &rpcError{ - code: e.Code, - desc: e.Desc, - } - case transport.ConnectionError: - return &rpcError{ - code: codes.Internal, - desc: e.Desc, - } - default: - switch err { - case context.DeadlineExceeded: - return &rpcError{ - code: codes.DeadlineExceeded, - desc: err.Error(), - } - case context.Canceled: - return &rpcError{ - code: codes.Canceled, - desc: err.Error(), - } - case ErrClientConnClosing: - return &rpcError{ - code: codes.FailedPrecondition, - desc: err.Error(), - } - } - - } - return Errorf(codes.Unknown, "%v", err) -} - -// convertCode converts a standard Go error into its canonical code. Note that -// this is only used to translate the error returned by the server applications. -func convertCode(err error) codes.Code { - switch err { - case nil: - return codes.OK - case io.EOF: - return codes.OutOfRange - case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: - return codes.FailedPrecondition - case os.ErrInvalid: - return codes.InvalidArgument - case context.Canceled: - return codes.Canceled - case context.DeadlineExceeded: - return codes.DeadlineExceeded - } - switch { - case os.IsExist(err): - return codes.AlreadyExists - case os.IsNotExist(err): - return codes.NotFound - case os.IsPermission(err): - return codes.PermissionDenied - } - return codes.Unknown + return status.Errorf(c, format, a...) } // MethodConfig defines the configuration recommended by the service providers for a @@ -489,24 +467,22 @@ type MethodConfig struct { // WaitForReady indicates whether RPCs sent to this method should wait until // the connection is ready by default (!failfast). The value specified via the // gRPC client API will override the value set here. - WaitForReady bool + WaitForReady *bool // Timeout is the default timeout for RPCs sent to this method. The actual // deadline used will be the minimum of the value specified here and the value // set by the application via the gRPC client API. If either one is not set, // then the other will be used. If neither is set, then the RPC has no deadline. - Timeout time.Duration + Timeout *time.Duration // MaxReqSize is the maximum allowed payload size for an individual request in a // stream (client->server) in bytes. The size which is measured is the serialized // payload after per-message compression (but before stream compression) in bytes. // The actual value used is the minumum of the value specified here and the value set // by the application via the gRPC client API. If either one is not set, then the other // will be used. If neither is set, then the built-in default is used. - // TODO: support this. - MaxReqSize uint32 + MaxReqSize *int // MaxRespSize is the maximum allowed payload size for an individual response in a // stream (server->client) in bytes. - // TODO: support this. - MaxRespSize uint32 + MaxRespSize *int } // ServiceConfig is provided by the service provider and contains parameters for how @@ -517,9 +493,32 @@ type ServiceConfig struct { // via grpc.WithBalancer will override this. LB Balancer // Methods contains a map for the methods in this service. + // If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig. + // If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists. + // Otherwise, the method has no MethodConfig to use. Methods map[string]MethodConfig } +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + // SupportPackageIsVersion4 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the grpc package. // @@ -527,3 +526,8 @@ type ServiceConfig struct { // requires a synchronised update of grpc-go and protoc-gen-go. This constant // should not be referenced from any other code. const SupportPackageIsVersion4 = true + +// Version is the current grpc version. +const Version = "1.5.0-dev" + +const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 985226d609..9e9a3442ef 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -53,12 +53,19 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" + "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" "google.golang.org/grpc/tap" "google.golang.org/grpc/transport" ) +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = 1024 * 1024 * 4 +) + type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) // MethodDesc represents an RPC service's method specification. @@ -105,24 +112,63 @@ type Server struct { } type options struct { - creds credentials.TransportCredentials - codec Codec - cp Compressor - dc Decompressor - maxMsgSize int - unaryInt UnaryServerInterceptor - streamInt StreamServerInterceptor - inTapHandle tap.ServerInHandle - statsHandler stats.Handler - maxConcurrentStreams uint32 - useHandlerImpl bool // use http.Handler-based server + creds credentials.TransportCredentials + codec Codec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + inTapHandle tap.ServerInHandle + statsHandler stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + useHandlerImpl bool // use http.Handler-based server + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 } -var defaultMaxMsgSize = 1024 * 1024 * 4 // use 4MB as the default message size limit +var defaultServerOptions = options{ + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, +} -// A ServerOption sets options. +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption func(*options) +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return func(o *options) { + o.initialWindowSize = s + } +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return func(o *options) { + o.initialConnWindowSize = s + } +} + +// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. +func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { + return func(o *options) { + o.keepaliveParams = kp + } +} + +// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. +func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { + return func(o *options) { + o.keepalivePolicy = kep + } +} + // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. func CustomCodec(codec Codec) ServerOption { return func(o *options) { @@ -144,11 +190,25 @@ func RPCDecompressor(dc Decompressor) ServerOption { } } -// MaxMsgSize returns a ServerOption to set the max message size in bytes for inbound mesages. -// If this is not set, gRPC uses the default 4MB. +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. Deprecated: use MaxRecvMsgSize instead. func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { + return func(o *options) { + o.maxReceiveMessageSize = m + } +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default 4MB. +func MaxSendMsgSize(m int) ServerOption { return func(o *options) { - o.maxMsgSize = m + o.maxSendMessageSize = m } } @@ -173,7 +233,7 @@ func Creds(c credentials.TransportCredentials) ServerOption { func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { return func(o *options) { if o.unaryInt != nil { - panic("The unary server interceptor has been set.") + panic("The unary server interceptor was already set and may not be reset.") } o.unaryInt = i } @@ -184,7 +244,7 @@ func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { func StreamInterceptor(i StreamServerInterceptor) ServerOption { return func(o *options) { if o.streamInt != nil { - panic("The stream server interceptor has been set.") + panic("The stream server interceptor was already set and may not be reset.") } o.streamInt = i } @@ -195,7 +255,7 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption { func InTapHandle(h tap.ServerInHandle) ServerOption { return func(o *options) { if o.inTapHandle != nil { - panic("The tap handle has been set.") + panic("The tap handle was already set and may not be reset.") } o.inTapHandle = h } @@ -208,11 +268,28 @@ func StatsHandler(h stats.Handler) ServerOption { } } +// UnknownServiceHandler returns a ServerOption that allows for adding a custom +// unknown service handler. The provided method is a bidi-streaming RPC service +// handler that will be invoked instead of returning the "unimplemented" gRPC +// error whenever a request is received for an unregistered service or method. +// The handling function has full access to the Context of the request and the +// stream, and the invocation passes through interceptors. +func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { + return func(o *options) { + o.unknownStreamDesc = &StreamDesc{ + StreamName: "unknown_service_handler", + Handler: streamHandler, + // We need to assume that the users of the streamHandler will want to use both. + ClientStreams: true, + ServerStreams: true, + } + } +} + // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { - var opts options - opts.maxMsgSize = defaultMaxMsgSize + opts := defaultServerOptions for _, o := range opt { o(&opts) } @@ -251,8 +328,8 @@ func (s *Server) errorf(format string, a ...interface{}) { } } -// RegisterService register a service and its implementation to the gRPC -// server. Called from the IDL generated code. This must be called before +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before // invoking Serve. func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { ht := reflect.TypeOf(sd.HandlerType).Elem() @@ -297,7 +374,7 @@ type MethodInfo struct { IsServerStream bool } -// ServiceInfo contains unary RPC method info, streaming RPC methid info and metadata for a service. +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. type ServiceInfo struct { Methods []MethodInfo // Metadata is the metadata specified in ServiceDesc when registering service. @@ -390,10 +467,12 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("Accept error: %v; retrying in %v", err, tempDelay) s.mu.Unlock() + timer := time.NewTimer(tempDelay) select { - case <-time.After(tempDelay): + case <-timer.C: case <-s.ctx.Done(): } + timer.Stop() continue } s.mu.Lock() @@ -446,10 +525,14 @@ func (s *Server) handleRawConn(rawConn net.Conn) { // transport.NewServerTransport). func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) { config := &transport.ServerConfig{ - MaxStreams: s.opts.maxConcurrentStreams, - AuthInfo: authInfo, - InTapHandle: s.opts.inTapHandle, - StatsHandler: s.opts.statsHandler, + MaxStreams: s.opts.maxConcurrentStreams, + AuthInfo: authInfo, + InTapHandle: s.opts.inTapHandle, + StatsHandler: s.opts.statsHandler, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, } st, err := transport.NewServerTransport("http2", c, config) if err != nil { @@ -581,14 +664,11 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str } p, err := encode(s.opts.codec, msg, cp, cbuf, outPayload) if err != nil { - // This typically indicates a fatal issue (e.g., memory - // corruption or hardware faults) the application program - // cannot handle. - // - // TODO(zhaoq): There exist other options also such as only closing the - // faulty stream locally and remotely (Other streams can keep going). Find - // the optimal option. - grpclog.Fatalf("grpc: Server failed to encode response %v", err) + grpclog.Println("grpc: server failed to encode response: ", err) + return err + } + if len(p) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(p), s.opts.maxSendMessageSize) } err = t.Write(stream, p, opts) if err == nil && outPayload != nil { @@ -605,9 +685,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. BeginTime: time.Now(), } sh.HandleRPC(stream.Context(), begin) - } - defer func() { - if sh != nil { + defer func() { end := &stats.End{ EndTime: time.Now(), } @@ -615,8 +693,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. end.Error = toRPCErr(err) } sh.HandleRPC(stream.Context(), end) - } - }() + }() + } if trInfo != nil { defer trInfo.tr.Finish() trInfo.firstLine.client = false @@ -633,136 +711,137 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. stream.SetSendCompress(s.opts.cp.Type()) } p := &parser{r: stream} - for { - pf, req, err := p.recvMsg(s.opts.maxMsgSize) - if err == io.EOF { - // The entire stream is done (for unary RPC only). - return err - } - if err == io.ErrUnexpectedEOF { - err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) - } - if err != nil { - switch err := err.(type) { - case *rpcError: - if e := t.WriteStatus(stream, err.code, err.desc); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) - } + pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize) + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if err == io.ErrUnexpectedEOF { + err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + if err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + } else { + switch st := err.(type) { case transport.ConnectionError: // Nothing to do here. case transport.StreamError: - if e := t.WriteStatus(stream, err.Code, err.Desc); e != nil { + if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) } default: - panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err)) + panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st)) } - return err } + return err + } - if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { - switch err := err.(type) { - case *rpcError: - if e := t.WriteStatus(stream, err.code, err.desc); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - return err - default: - if e := t.WriteStatus(stream, codes.Internal, err.Error()); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - // TODO checkRecvPayload always return RPC error. Add a return here if necessary. + if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) } + return err } - var inPayload *stats.InPayload - if sh != nil { - inPayload = &stats.InPayload{ - RecvTime: time.Now(), - } + if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) } - statusCode := codes.OK - statusDesc := "" - df := func(v interface{}) error { - if inPayload != nil { - inPayload.WireLength = len(req) - } - if pf == compressionMade { - var err error - req, err = s.opts.dc.Do(bytes.NewReader(req)) - if err != nil { - if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) - } - return Errorf(codes.Internal, err.Error()) - } - } - if len(req) > s.opts.maxMsgSize { - // TODO: Revisit the error code. Currently keep it consistent with - // java implementation. - statusCode = codes.Internal - statusDesc = fmt.Sprintf("grpc: server received a message of %d bytes exceeding %d limit", len(req), s.opts.maxMsgSize) - } - if err := s.opts.codec.Unmarshal(req, v); err != nil { - return err - } - if inPayload != nil { - inPayload.Payload = v - inPayload.Data = req - inPayload.Length = len(req) - sh.HandleRPC(stream.Context(), inPayload) - } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) - } - return nil + + // TODO checkRecvPayload always return RPC error. Add a return here if necessary. + } + var inPayload *stats.InPayload + if sh != nil { + inPayload = &stats.InPayload{ + RecvTime: time.Now(), } - reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) - if appErr != nil { - if err, ok := appErr.(*rpcError); ok { - statusCode = err.code - statusDesc = err.desc - } else { - statusCode = convertCode(appErr) - statusDesc = appErr.Error() - } - if trInfo != nil && statusCode != codes.OK { - trInfo.tr.LazyLog(stringer(statusDesc), true) - trInfo.tr.SetError() - } - if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) + } + df := func(v interface{}) error { + if inPayload != nil { + inPayload.WireLength = len(req) + } + if pf == compressionMade { + var err error + req, err = s.opts.dc.Do(bytes.NewReader(req)) + if err != nil { + return Errorf(codes.Internal, err.Error()) } - return Errorf(statusCode, statusDesc) + } + if len(req) > s.opts.maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with + // java implementation. + return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize) + } + if err := s.opts.codec.Unmarshal(req, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + if inPayload != nil { + inPayload.Payload = v + inPayload.Data = req + inPayload.Length = len(req) + sh.HandleRPC(stream.Context(), inPayload) } if trInfo != nil { - trInfo.tr.LazyLog(stringer("OK"), false) + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) } - opts := &transport.Options{ - Last: true, - Delay: false, + return nil + } + reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert appErr if it is not a grpc status error. + appErr = status.Error(convertCode(appErr), appErr.Error()) + appStatus, _ = status.FromError(appErr) + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{ + Last: true, + Delay: false, + } + if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err } - if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { - switch err := err.(type) { + if s, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, s); e != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { case transport.ConnectionError: // Nothing to do here. case transport.StreamError: - statusCode = err.Code - statusDesc = err.Desc + if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) + } default: - statusCode = codes.Unknown - statusDesc = err.Error() + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) } - return err } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) - } - errWrite := t.WriteStatus(stream, statusCode, statusDesc) - if statusCode != codes.OK { - return Errorf(statusCode, statusDesc) - } - return errWrite + return err + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + return t.WriteStatus(stream, status.New(codes.OK, "")) } func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { @@ -772,9 +851,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp BeginTime: time.Now(), } sh.HandleRPC(stream.Context(), begin) - } - defer func() { - if sh != nil { + defer func() { end := &stats.End{ EndTime: time.Now(), } @@ -782,21 +859,22 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp end.Error = toRPCErr(err) } sh.HandleRPC(stream.Context(), end) - } - }() + }() + } if s.opts.cp != nil { stream.SetSendCompress(s.opts.cp.Type()) } ss := &serverStream{ - t: t, - s: stream, - p: &parser{r: stream}, - codec: s.opts.codec, - cp: s.opts.cp, - dc: s.opts.dc, - maxMsgSize: s.opts.maxMsgSize, - trInfo: trInfo, - statsHandler: sh, + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.opts.codec, + cp: s.opts.cp, + dc: s.opts.dc, + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: sh, } if ss.cp != nil { ss.cbuf = new(bytes.Buffer) @@ -815,43 +893,47 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp }() } var appErr error + var server interface{} + if srv != nil { + server = srv.server + } if s.opts.streamInt == nil { - appErr = sd.Handler(srv.server, ss) + appErr = sd.Handler(server, ss) } else { info := &StreamServerInfo{ FullMethod: stream.Method(), IsClientStream: sd.ClientStreams, IsServerStream: sd.ServerStreams, } - appErr = s.opts.streamInt(srv.server, ss, info, sd.Handler) + appErr = s.opts.streamInt(server, ss, info, sd.Handler) } if appErr != nil { - if err, ok := appErr.(*rpcError); ok { - ss.statusCode = err.code - ss.statusDesc = err.desc - } else if err, ok := appErr.(transport.StreamError); ok { - ss.statusCode = err.Code - ss.statusDesc = err.Desc - } else { - ss.statusCode = convertCode(appErr) - ss.statusDesc = appErr.Error() + appStatus, ok := status.FromError(appErr) + if !ok { + switch err := appErr.(type) { + case transport.StreamError: + appStatus = status.New(err.Code, err.Desc) + default: + appStatus = status.New(convertCode(appErr), appErr.Error()) + } + appErr = appStatus.Err() + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + ss.trInfo.tr.SetError() + ss.mu.Unlock() } + t.WriteStatus(ss.s, appStatus) + // TODO: Should we log an error from WriteStatus here and below? + return appErr } if trInfo != nil { ss.mu.Lock() - if ss.statusCode != codes.OK { - ss.trInfo.tr.LazyLog(stringer(ss.statusDesc), true) - ss.trInfo.tr.SetError() - } else { - ss.trInfo.tr.LazyLog(stringer("OK"), false) - } + ss.trInfo.tr.LazyLog(stringer("OK"), false) ss.mu.Unlock() } - errWrite := t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc) - if ss.statusCode != codes.OK { - return Errorf(ss.statusCode, ss.statusDesc) - } - return errWrite + return t.WriteStatus(ss.s, status.New(codes.OK, "")) } @@ -867,7 +949,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, codes.InvalidArgument, errDesc); err != nil { + if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() @@ -883,12 +965,16 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str method := sm[pos+1:] srv, ok := s.m[service] if !ok { + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) trInfo.tr.SetError() } errDesc := fmt.Sprintf("unknown service %v", service) - if err := t.WriteStatus(stream, codes.Unimplemented, errDesc); err != nil { + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() @@ -913,8 +999,12 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true) trInfo.tr.SetError() } + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } errDesc := fmt.Sprintf("unknown method %v", method) - if err := t.WriteStatus(stream, codes.Unimplemented, errDesc); err != nil { + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() @@ -957,8 +1047,9 @@ func (s *Server) Stop() { s.mu.Unlock() } -// GracefulStop stops the gRPC server gracefully. It stops the server to accept new -// connections and RPCs and blocks until all the pending RPCs are finished. +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. func (s *Server) GracefulStop() { s.mu.Lock() defer s.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/stats/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/stats/grpc_testing/test.pb.go index b24dcd8d34..5730004ab0 100644 --- a/vendor/google.golang.org/grpc/stats/grpc_testing/test.pb.go +++ b/vendor/google.golang.org/grpc/stats/grpc_testing/test.pb.go @@ -34,7 +34,6 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package -// Unary request. type SimpleRequest struct { Id int32 `protobuf:"varint,2,opt,name=id" json:"id,omitempty"` } @@ -44,7 +43,13 @@ func (m *SimpleRequest) String() string { return proto.CompactTextStr func (*SimpleRequest) ProtoMessage() {} func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -// Unary response, as configured by the request. +func (m *SimpleRequest) GetId() int32 { + if m != nil { + return m.Id + } + return 0 +} + type SimpleResponse struct { Id int32 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` } @@ -54,6 +59,13 @@ func (m *SimpleResponse) String() string { return proto.CompactTextSt func (*SimpleResponse) ProtoMessage() {} func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *SimpleResponse) GetId() int32 { + if m != nil { + return m.Id + } + return 0 +} + func init() { proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") @@ -77,6 +89,10 @@ type TestServiceClient interface { // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) + // Client stream + ClientStreamCall(ctx context.Context, opts ...grpc.CallOption) (TestService_ClientStreamCallClient, error) + // Server stream + ServerStreamCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (TestService_ServerStreamCallClient, error) } type testServiceClient struct { @@ -127,6 +143,72 @@ func (x *testServiceFullDuplexCallClient) Recv() (*SimpleResponse, error) { return m, nil } +func (c *testServiceClient) ClientStreamCall(ctx context.Context, opts ...grpc.CallOption) (TestService_ClientStreamCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[1], c.cc, "/grpc.testing.TestService/ClientStreamCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceClientStreamCallClient{stream} + return x, nil +} + +type TestService_ClientStreamCallClient interface { + Send(*SimpleRequest) error + CloseAndRecv() (*SimpleResponse, error) + grpc.ClientStream +} + +type testServiceClientStreamCallClient struct { + grpc.ClientStream +} + +func (x *testServiceClientStreamCallClient) Send(m *SimpleRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceClientStreamCallClient) CloseAndRecv() (*SimpleResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(SimpleResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) ServerStreamCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (TestService_ServerStreamCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[2], c.cc, "/grpc.testing.TestService/ServerStreamCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceServerStreamCallClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type TestService_ServerStreamCallClient interface { + Recv() (*SimpleResponse, error) + grpc.ClientStream +} + +type testServiceServerStreamCallClient struct { + grpc.ClientStream +} + +func (x *testServiceServerStreamCallClient) Recv() (*SimpleResponse, error) { + m := new(SimpleResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // Server API for TestService service type TestServiceServer interface { @@ -137,6 +219,10 @@ type TestServiceServer interface { // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. FullDuplexCall(TestService_FullDuplexCallServer) error + // Client stream + ClientStreamCall(TestService_ClientStreamCallServer) error + // Server stream + ServerStreamCall(*SimpleRequest, TestService_ServerStreamCallServer) error } func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { @@ -187,6 +273,53 @@ func (x *testServiceFullDuplexCallServer) Recv() (*SimpleRequest, error) { return m, nil } +func _TestService_ClientStreamCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).ClientStreamCall(&testServiceClientStreamCallServer{stream}) +} + +type TestService_ClientStreamCallServer interface { + SendAndClose(*SimpleResponse) error + Recv() (*SimpleRequest, error) + grpc.ServerStream +} + +type testServiceClientStreamCallServer struct { + grpc.ServerStream +} + +func (x *testServiceClientStreamCallServer) SendAndClose(m *SimpleResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceClientStreamCallServer) Recv() (*SimpleRequest, error) { + m := new(SimpleRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TestService_ServerStreamCall_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SimpleRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(TestServiceServer).ServerStreamCall(m, &testServiceServerStreamCallServer{stream}) +} + +type TestService_ServerStreamCallServer interface { + Send(*SimpleResponse) error + grpc.ServerStream +} + +type testServiceServerStreamCallServer struct { + grpc.ServerStream +} + +func (x *testServiceServerStreamCallServer) Send(m *SimpleResponse) error { + return x.ServerStream.SendMsg(m) +} + var _TestService_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.testing.TestService", HandlerType: (*TestServiceServer)(nil), @@ -203,6 +336,16 @@ var _TestService_serviceDesc = grpc.ServiceDesc{ ServerStreams: true, ClientStreams: true, }, + { + StreamName: "ClientStreamCall", + Handler: _TestService_ClientStreamCall_Handler, + ClientStreams: true, + }, + { + StreamName: "ServerStreamCall", + Handler: _TestService_ServerStreamCall_Handler, + ServerStreams: true, + }, }, Metadata: "test.proto", } @@ -210,16 +353,18 @@ var _TestService_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("test.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 167 bytes of a gzipped FileDescriptorProto + // 196 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x49, 0x2d, 0x2e, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x49, 0x2f, 0x2a, 0x48, 0xd6, 0x03, 0x09, 0x64, 0xe6, 0xa5, 0x2b, 0xc9, 0x73, 0xf1, 0x06, 0x67, 0xe6, 0x16, 0xe4, 0xa4, 0x06, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08, 0xf1, 0x71, 0x31, 0x65, 0xa6, 0x48, 0x30, 0x29, 0x30, 0x6a, 0xb0, 0x06, 0x31, 0x65, 0xa6, 0x28, 0x29, 0x70, 0xf1, 0xc1, 0x14, 0x14, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x42, - 0x55, 0x30, 0xc3, 0x54, 0x18, 0x2d, 0x63, 0xe4, 0xe2, 0x0e, 0x49, 0x2d, 0x2e, 0x09, 0x4e, 0x2d, + 0x55, 0x30, 0xc3, 0x54, 0x18, 0x9d, 0x60, 0xe2, 0xe2, 0x0e, 0x49, 0x2d, 0x2e, 0x09, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x15, 0x72, 0xe3, 0xe2, 0x0c, 0xcd, 0x4b, 0x2c, 0xaa, 0x74, 0x4e, 0xcc, 0xc9, 0x11, 0x92, 0xd6, 0x43, 0xb6, 0x4e, 0x0f, 0xc5, 0x2e, 0x29, 0x19, 0xec, 0x92, 0x50, 0x7b, 0xfc, 0xb9, 0xf8, 0xdc, 0x4a, 0x73, 0x72, 0x5c, 0x4a, 0x0b, 0x72, 0x52, 0x2b, 0x28, 0x34, 0x4c, - 0x83, 0xd1, 0x80, 0x31, 0x89, 0x0d, 0x1c, 0x00, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8d, - 0x82, 0x5b, 0xdd, 0x0e, 0x01, 0x00, 0x00, + 0x83, 0xd1, 0x80, 0x51, 0xc8, 0x9f, 0x4b, 0xc0, 0x39, 0x27, 0x33, 0x35, 0xaf, 0x24, 0xb8, 0xa4, + 0x28, 0x35, 0x31, 0x97, 0x62, 0x23, 0x41, 0x06, 0x82, 0x3c, 0x9d, 0x5a, 0x44, 0x15, 0x03, 0x0d, + 0x18, 0x93, 0xd8, 0xc0, 0x51, 0x64, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x61, 0x49, 0x59, 0xe6, + 0xb0, 0x01, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go index 26e1a8e2f0..5fdce2f5bb 100644 --- a/vendor/google.golang.org/grpc/stats/handlers.go +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -45,19 +45,22 @@ type ConnTagInfo struct { RemoteAddr net.Addr // LocalAddr is the local address of the corresponding connection. LocalAddr net.Addr - // TODO add QOS related fields. } // RPCTagInfo defines the relevant information needed by RPC context tagger. type RPCTagInfo struct { // FullMethodName is the RPC method in the format of /package.service/method. FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool } // Handler defines the interface for the related stats handling (e.g., RPCs, connections). type Handler interface { // TagRPC can attach some information to the given context. - // The returned context is used in the rest lifetime of the RPC. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. TagRPC(context.Context, *RPCTagInfo) context.Context // HandleRPC processes the RPC stats. HandleRPC(context.Context, RPCStats) diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index a82448a68b..6c406c7bad 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -49,7 +49,7 @@ type RPCStats interface { } // Begin contains stats when an RPC begins. -// FailFast are only valid if Client is true. +// FailFast is only valid if this Begin is from client side. type Begin struct { // Client is true if this Begin is from client side. Client bool @@ -59,7 +59,7 @@ type Begin struct { FailFast bool } -// IsClient indicates if this is from client side. +// IsClient indicates if the stats information is from client side. func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} @@ -80,19 +80,19 @@ type InPayload struct { RecvTime time.Time } -// IsClient indicates if this is from client side. +// IsClient indicates if the stats information is from client side. func (s *InPayload) IsClient() bool { return s.Client } func (s *InPayload) isRPCStats() {} // InHeader contains stats when a header is received. -// FullMethod, addresses and Compression are only valid if Client is false. type InHeader struct { // Client is true if this InHeader is from client side. Client bool // WireLength is the wire length of header. WireLength int + // The following fields are valid only if Client is false. // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string // RemoteAddr is the remote address of the corresponding connection. @@ -103,7 +103,7 @@ type InHeader struct { Compression string } -// IsClient indicates if this is from client side. +// IsClient indicates if the stats information is from client side. func (s *InHeader) IsClient() bool { return s.Client } func (s *InHeader) isRPCStats() {} @@ -116,7 +116,7 @@ type InTrailer struct { WireLength int } -// IsClient indicates if this is from client side. +// IsClient indicates if the stats information is from client side. func (s *InTrailer) IsClient() bool { return s.Client } func (s *InTrailer) isRPCStats() {} @@ -137,19 +137,19 @@ type OutPayload struct { SentTime time.Time } -// IsClient indicates if this is from client side. +// IsClient indicates if this stats information is from client side. func (s *OutPayload) IsClient() bool { return s.Client } func (s *OutPayload) isRPCStats() {} // OutHeader contains stats when a header is sent. -// FullMethod, addresses and Compression are only valid if Client is true. type OutHeader struct { // Client is true if this OutHeader is from client side. Client bool // WireLength is the wire length of header. WireLength int + // The following fields are valid only if Client is true. // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string // RemoteAddr is the remote address of the corresponding connection. @@ -160,7 +160,7 @@ type OutHeader struct { Compression string } -// IsClient indicates if this is from client side. +// IsClient indicates if this stats information is from client side. func (s *OutHeader) IsClient() bool { return s.Client } func (s *OutHeader) isRPCStats() {} @@ -173,7 +173,7 @@ type OutTrailer struct { WireLength int } -// IsClient indicates if this is from client side. +// IsClient indicates if this stats information is from client side. func (s *OutTrailer) IsClient() bool { return s.Client } func (s *OutTrailer) isRPCStats() {} @@ -184,7 +184,7 @@ type End struct { Client bool // EndTime is the time when the RPC ends. EndTime time.Time - // Error is the error just happened. Its type is gRPC error. + // Error is the error just happened. It implements status.Status if non-nil. Error error } diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go new file mode 100644 index 0000000000..99a4cbe511 --- /dev/null +++ b/vendor/google.golang.org/grpc/status/status.go @@ -0,0 +1,145 @@ +/* + * + * Copyright 2017, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +) + +// statusError is an alias of a status proto. It implements error and Status, +// and a nil statusError should never be returned by this package. +type statusError spb.Status + +func (se *statusError) Error() string { + p := (*spb.Status)(se) + return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) +} + +func (se *statusError) status() *Status { + return &Status{s: (*spb.Status)(se)} +} + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is +// OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return (*statusError)(s.s) +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// Error returns an error representing c and msg. If c is OK, returns nil. +func Error(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Error(c, fmt.Sprintf(format, a...)) +} + +// ErrorProto returns an error representing s. If s.Code is OK, returns nil. +func ErrorProto(s *spb.Status) error { + return FromProto(s).Err() +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// FromError returns a Status representing err if it was produced from this +// package, otherwise it returns nil, false. +func FromError(err error) (s *Status, ok bool) { + if err == nil { + return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true + } + if s, ok := err.(*statusError); ok { + return s.status(), true + } + return nil, false +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index bb468dc37e..2106d3ecb2 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -37,7 +37,6 @@ import ( "bytes" "errors" "io" - "math" "sync" "time" @@ -46,6 +45,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" "google.golang.org/grpc/transport" ) @@ -113,17 +113,24 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth cancel context.CancelFunc ) c := defaultCallInfo - if mc, ok := cc.getMethodConfig(method); ok { - c.failFast = !mc.WaitForReady - if mc.Timeout > 0 { - ctx, cancel = context.WithTimeout(ctx, mc.Timeout) - } + mc := cc.GetMethodConfig(method) + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + if mc.Timeout != nil { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) } + + opts = append(cc.dopts.callOptions, opts...) for _, o := range opts { if err := o.before(&c); err != nil { return nil, toRPCErr(err) } } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, @@ -132,6 +139,9 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } + if c.creds != nil { + callHdr.Creds = c.creds + } var trInfo traceInfo if EnableTracing { trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) @@ -151,26 +161,27 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } }() } + ctx = newContextWithRPCInfo(ctx) sh := cc.dopts.copts.StatsHandler if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method}) + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) begin := &stats.Begin{ Client: true, BeginTime: time.Now(), FailFast: c.failFast, } sh.HandleRPC(ctx, begin) - } - defer func() { - if err != nil && sh != nil { - // Only handle end stats if err != nil. - end := &stats.End{ - Client: true, - Error: err, + defer func() { + if err != nil { + // Only handle end stats if err != nil. + end := &stats.End{ + Client: true, + Error: err, + } + sh.HandleRPC(ctx, end) } - sh.HandleRPC(ctx, end) - } - }() + }() + } gopts := BalancerGetOptions{ BlockingWait: !c.failFast, } @@ -178,7 +189,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth t, put, err = cc.getTransport(ctx, gopts) if err != nil { // TODO(zhaoq): Probably revisit the error handling. - if _, ok := err.(*rpcError); ok { + if _, ok := status.FromError(err); ok { return nil, err } if err == errConnClosing || err == errConnUnavailable { @@ -193,14 +204,17 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth s, err = t.NewStream(ctx, callHdr) if err != nil { + if _, ok := err.(transport.ConnectionError); ok && put != nil { + // If error is connection error, transport was sending data on wire, + // and we are not sure if anything has been sent on wire. + // If error is not connection error, we are sure nothing has been sent. + updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false}) + } if put != nil { put() put = nil } - if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { - if c.failFast { - return nil, toRPCErr(err) - } + if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { continue } return nil, toRPCErr(err) @@ -236,14 +250,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth select { case <-t.Error(): // Incur transport error, simply exit. + case <-cc.ctx.Done(): + cs.finish(ErrClientConnClosing) + cs.closeTransportStream(ErrClientConnClosing) case <-s.Done(): // TODO: The trace of the RPC is terminated here when there is no pending // I/O, which is probably not the optimal solution. - if s.StatusCode() == codes.OK { - cs.finish(nil) - } else { - cs.finish(Errorf(s.StatusCode(), "%s", s.StatusDesc())) - } + cs.finish(s.Status().Err()) cs.closeTransportStream(nil) case <-s.GoAway(): cs.finish(errConnDrain) @@ -273,9 +286,10 @@ type clientStream struct { tracing bool // set to EnableTracing when the clientStream is created. - mu sync.Mutex - put func() - closed bool + mu sync.Mutex + put func() + closed bool + finished bool // trInfo.tr is set when the clientStream is created (if EnableTracing is true), // and is set to nil when the clientStream's finish method is called. trInfo traceInfo @@ -350,7 +364,13 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { } }() if err != nil { - return Errorf(codes.Internal, "grpc: %v", err) + return err + } + if cs.c.maxSendMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)") + } + if len(out) > *cs.c.maxSendMessageSize { + return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(out), *cs.c.maxSendMessageSize) } err = cs.t.Write(cs.s, out, &transport.Options{Last: false}) if err == nil && outPayload != nil { @@ -361,28 +381,16 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { } func (cs *clientStream) RecvMsg(m interface{}) (err error) { - defer func() { - if err != nil && cs.statsHandler != nil { - // Only generate End if err != nil. - // If err == nil, it's not the last RecvMsg. - // The last RecvMsg gets either an RPC error or io.EOF. - end := &stats.End{ - Client: true, - EndTime: time.Now(), - } - if err != io.EOF { - end.Error = toRPCErr(err) - } - cs.statsHandler.HandleRPC(cs.statsCtx, end) - } - }() var inPayload *stats.InPayload if cs.statsHandler != nil { inPayload = &stats.InPayload{ Client: true, } } - err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32, inPayload) + if cs.c.maxReceiveMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") + } + err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload) defer func() { // err != nil indicates the termination of the stream. if err != nil { @@ -405,17 +413,20 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { } // Special handling for client streaming rpc. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32, nil) + if cs.c.maxReceiveMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") + } + err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil) cs.closeTransportStream(err) if err == nil { return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } if err == io.EOF { - if cs.s.StatusCode() == codes.OK { - cs.finish(err) - return nil + if se := cs.s.Status().Err(); se != nil { + return se } - return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc()) + cs.finish(err) + return nil } return toRPCErr(err) } @@ -423,11 +434,11 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { cs.closeTransportStream(err) } if err == io.EOF { - if cs.s.StatusCode() == codes.OK { - // Returns io.EOF to indicate the end of the stream. - return + if statusErr := cs.s.Status().Err(); statusErr != nil { + return statusErr } - return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc()) + // Returns io.EOF to indicate the end of the stream. + return } return toRPCErr(err) } @@ -461,20 +472,39 @@ func (cs *clientStream) closeTransportStream(err error) { } func (cs *clientStream) finish(err error) { + cs.mu.Lock() + defer cs.mu.Unlock() + if cs.finished { + return + } + cs.finished = true defer func() { if cs.cancel != nil { cs.cancel() } }() - cs.mu.Lock() - defer cs.mu.Unlock() for _, o := range cs.opts { o.after(&cs.c) } if cs.put != nil { + updateRPCInfoInContext(cs.s.Context(), rpcInfo{ + bytesSent: cs.s.BytesSent(), + bytesReceived: cs.s.BytesReceived(), + }) cs.put() cs.put = nil } + if cs.statsHandler != nil { + end := &stats.End{ + Client: true, + EndTime: time.Now(), + } + if err != io.EOF { + // end.Error is nil if the RPC finished successfully. + end.Error = toRPCErr(err) + } + cs.statsHandler.HandleRPC(cs.statsCtx, end) + } if !cs.tracing { return } @@ -511,17 +541,16 @@ type ServerStream interface { // serverStream implements a server side Stream. type serverStream struct { - t transport.ServerTransport - s *transport.Stream - p *parser - codec Codec - cp Compressor - dc Decompressor - cbuf *bytes.Buffer - maxMsgSize int - statusCode codes.Code - statusDesc string - trInfo *traceInfo + t transport.ServerTransport + s *transport.Stream + p *parser + codec Codec + cp Compressor + dc Decompressor + cbuf *bytes.Buffer + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo statsHandler stats.Handler @@ -577,9 +606,11 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } }() if err != nil { - err = Errorf(codes.Internal, "grpc: %v", err) return err } + if len(out) > ss.maxSendMessageSize { + return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(out), ss.maxSendMessageSize) + } if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } @@ -609,7 +640,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { if ss.statsHandler != nil { inPayload = &stats.InPayload{} } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize, inPayload); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload); err != nil { if err == io.EOF { return err } diff --git a/vendor/google.golang.org/grpc/transport/pre_go16.go b/vendor/google.golang.org/grpc/test/race.go similarity index 80% rename from vendor/google.golang.org/grpc/transport/pre_go16.go rename to vendor/google.golang.org/grpc/test/race.go index 33d91c17c4..2dc7cf89a6 100644 --- a/vendor/google.golang.org/grpc/transport/pre_go16.go +++ b/vendor/google.golang.org/grpc/test/race.go @@ -1,4 +1,4 @@ -// +build !go1.6 +// +build race /* * Copyright 2016, Google Inc. @@ -32,20 +32,8 @@ * */ -package transport +package test -import ( - "net" - "time" - - "golang.org/x/net/context" -) - -// dialContext connects to the address on the named network. -func dialContext(ctx context.Context, network, address string) (net.Conn, error) { - var dialer net.Dialer - if deadline, ok := ctx.Deadline(); ok { - dialer.Timeout = deadline.Sub(time.Now()) - } - return dialer.Dial(network, address) +func init() { + raceMode = true } diff --git a/vendor/google.golang.org/grpc/test/servertester.go b/vendor/google.golang.org/grpc/test/servertester.go new file mode 100644 index 0000000000..ed3724ddae --- /dev/null +++ b/vendor/google.golang.org/grpc/test/servertester.go @@ -0,0 +1,295 @@ +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package test + +import ( + "bytes" + "errors" + "io" + "strings" + "testing" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +// This is a subset of http2's serverTester type. +// +// serverTester wraps a io.ReadWriter (acting like the underlying +// network connection) and provides utility methods to read and write +// http2 frames. +// +// NOTE(bradfitz): this could eventually be exported somewhere. Others +// have asked for it too. For now I'm still experimenting with the +// API and don't feel like maintaining a stable testing API. + +type serverTester struct { + cc io.ReadWriteCloser // client conn + t testing.TB + fr *http2.Framer + + // writing headers: + headerBuf bytes.Buffer + hpackEnc *hpack.Encoder + + // reading frames: + frc chan http2.Frame + frErrc chan error + readTimer *time.Timer +} + +func newServerTesterFromConn(t testing.TB, cc io.ReadWriteCloser) *serverTester { + st := &serverTester{ + t: t, + cc: cc, + frc: make(chan http2.Frame, 1), + frErrc: make(chan error, 1), + } + st.hpackEnc = hpack.NewEncoder(&st.headerBuf) + st.fr = http2.NewFramer(cc, cc) + st.fr.ReadMetaHeaders = hpack.NewDecoder(4096 /*initialHeaderTableSize*/, nil) + + return st +} + +func (st *serverTester) readFrame() (http2.Frame, error) { + go func() { + fr, err := st.fr.ReadFrame() + if err != nil { + st.frErrc <- err + } else { + st.frc <- fr + } + }() + t := time.NewTimer(2 * time.Second) + defer t.Stop() + select { + case f := <-st.frc: + return f, nil + case err := <-st.frErrc: + return nil, err + case <-t.C: + return nil, errors.New("timeout waiting for frame") + } +} + +// greet initiates the client's HTTP/2 connection into a state where +// frames may be sent. +func (st *serverTester) greet() { + st.writePreface() + st.writeInitialSettings() + st.wantSettings() + st.writeSettingsAck() + for { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + switch f := f.(type) { + case *http2.WindowUpdateFrame: + // grpc's transport/http2_server sends this + // before the settings ack. The Go http2 + // server uses a setting instead. + case *http2.SettingsFrame: + if f.IsAck() { + return + } + st.t.Fatalf("during greet, got non-ACK settings frame") + default: + st.t.Fatalf("during greet, unexpected frame type %T", f) + } + } +} + +func (st *serverTester) writePreface() { + n, err := st.cc.Write([]byte(http2.ClientPreface)) + if err != nil { + st.t.Fatalf("Error writing client preface: %v", err) + } + if n != len(http2.ClientPreface) { + st.t.Fatalf("Writing client preface, wrote %d bytes; want %d", n, len(http2.ClientPreface)) + } +} + +func (st *serverTester) writeInitialSettings() { + if err := st.fr.WriteSettings(); err != nil { + st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err) + } +} + +func (st *serverTester) writeSettingsAck() { + if err := st.fr.WriteSettingsAck(); err != nil { + st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err) + } +} + +func (st *serverTester) wantSettings() *http2.SettingsFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err) + } + sf, ok := f.(*http2.SettingsFrame) + if !ok { + st.t.Fatalf("got a %T; want *SettingsFrame", f) + } + return sf +} + +func (st *serverTester) wantSettingsAck() { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + sf, ok := f.(*http2.SettingsFrame) + if !ok { + st.t.Fatalf("Wanting a settings ACK, received a %T", f) + } + if !sf.IsAck() { + st.t.Fatal("Settings Frame didn't have ACK set") + } +} + +// wait for any activity from the server +func (st *serverTester) wantAnyFrame() http2.Frame { + f, err := st.fr.ReadFrame() + if err != nil { + st.t.Fatal(err) + } + return f +} + +func (st *serverTester) encodeHeaderField(k, v string) { + err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + if err != nil { + st.t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } +} + +// encodeHeader encodes headers and returns their HPACK bytes. headers +// must contain an even number of key/value pairs. There may be +// multiple pairs for keys (e.g. "cookie"). The :method, :path, and +// :scheme headers default to GET, / and https. +func (st *serverTester) encodeHeader(headers ...string) []byte { + if len(headers)%2 == 1 { + panic("odd number of kv args") + } + + st.headerBuf.Reset() + + if len(headers) == 0 { + // Fast path, mostly for benchmarks, so test code doesn't pollute + // profiles when we're looking to improve server allocations. + st.encodeHeaderField(":method", "GET") + st.encodeHeaderField(":path", "/") + st.encodeHeaderField(":scheme", "https") + return st.headerBuf.Bytes() + } + + if len(headers) == 2 && headers[0] == ":method" { + // Another fast path for benchmarks. + st.encodeHeaderField(":method", headers[1]) + st.encodeHeaderField(":path", "/") + st.encodeHeaderField(":scheme", "https") + return st.headerBuf.Bytes() + } + + pseudoCount := map[string]int{} + keys := []string{":method", ":path", ":scheme"} + vals := map[string][]string{ + ":method": {"GET"}, + ":path": {"/"}, + ":scheme": {"https"}, + } + for len(headers) > 0 { + k, v := headers[0], headers[1] + headers = headers[2:] + if _, ok := vals[k]; !ok { + keys = append(keys, k) + } + if strings.HasPrefix(k, ":") { + pseudoCount[k]++ + if pseudoCount[k] == 1 { + vals[k] = []string{v} + } else { + // Allows testing of invalid headers w/ dup pseudo fields. + vals[k] = append(vals[k], v) + } + } else { + vals[k] = append(vals[k], v) + } + } + for _, k := range keys { + for _, v := range vals[k] { + st.encodeHeaderField(k, v) + } + } + return st.headerBuf.Bytes() +} + +func (st *serverTester) writeHeadersGRPC(streamID uint32, path string) { + st.writeHeaders(http2.HeadersFrameParam{ + StreamID: streamID, + BlockFragment: st.encodeHeader( + ":method", "POST", + ":path", path, + "content-type", "application/grpc", + "te", "trailers", + ), + EndStream: false, + EndHeaders: true, + }) +} + +func (st *serverTester) writeHeaders(p http2.HeadersFrameParam) { + if err := st.fr.WriteHeaders(p); err != nil { + st.t.Fatalf("Error writing HEADERS: %v", err) + } +} + +func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) { + if err := st.fr.WriteData(streamID, endStream, data); err != nil { + st.t.Fatalf("Error writing DATA: %v", err) + } +} + +func (st *serverTester) writeRSTStream(streamID uint32, code http2.ErrCode) { + if err := st.fr.WriteRSTStream(streamID, code); err != nil { + st.t.Fatalf("Error writing RST_STREAM: %v", err) + } +} + +func (st *serverTester) writeDataPadded(streamID uint32, endStream bool, data, padding []byte) { + if err := st.fr.WriteDataPadded(streamID, endStream, data, padding); err != nil { + st.t.Fatalf("Error writing DATA with padding: %v", err) + } +} diff --git a/vendor/google.golang.org/grpc/transport/control.go b/vendor/google.golang.org/grpc/transport/control.go index 2586cba469..beff034c83 100644 --- a/vendor/google.golang.org/grpc/transport/control.go +++ b/vendor/google.golang.org/grpc/transport/control.go @@ -35,7 +35,9 @@ package transport import ( "fmt" + "math" "sync" + "time" "golang.org/x/net/http2" ) @@ -44,8 +46,20 @@ const ( // The default value of flow control window size in HTTP2 spec. defaultWindowSize = 65535 // The initial window size for flow control. - initialWindowSize = defaultWindowSize // for an RPC - initialConnWindowSize = defaultWindowSize * 16 // for a connection + initialWindowSize = defaultWindowSize // for an RPC + initialConnWindowSize = defaultWindowSize * 16 // for a connection + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = time.Duration(20 * time.Second) + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = time.Duration(2 * time.Hour) + defaultServerKeepaliveTimeout = time.Duration(20 * time.Second) + defaultKeepalivePolicyMinTime = time.Duration(5 * time.Minute) + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 ) // The following defines various control items which could flow through @@ -54,6 +68,7 @@ const ( type windowUpdate struct { streamID uint32 increment uint32 + flush bool } func (*windowUpdate) item() {} @@ -73,6 +88,8 @@ type resetStream struct { func (*resetStream) item() {} type goAway struct { + code http2.ErrCode + debugData []byte } func (*goAway) item() {} @@ -153,6 +170,40 @@ type inFlow struct { // The amount of data the application has consumed but grpc has not sent // window update for them. Used to reduce window update frequency. pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + defer f.mu.Unlock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as speecified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + return f.delta + } + return 0 } // onData is invoked when some data frame is received. It updates pendingData. @@ -160,7 +211,7 @@ func (f *inFlow) onData(n uint32) error { f.mu.Lock() defer f.mu.Unlock() f.pendingData += n - if f.pendingData+f.pendingUpdate > f.limit { + if f.pendingData+f.pendingUpdate > f.limit+f.delta { return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit) } return nil @@ -175,6 +226,13 @@ func (f *inFlow) onRead(n uint32) uint32 { return 0 } f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } f.pendingUpdate += n if f.pendingUpdate >= f.limit/4 { wu := f.pendingUpdate @@ -184,10 +242,10 @@ func (f *inFlow) onRead(n uint32) uint32 { return 0 } -func (f *inFlow) resetPendingData() uint32 { +func (f *inFlow) resetPendingUpdate() uint32 { f.mu.Lock() defer f.mu.Unlock() - n := f.pendingData - f.pendingData = 0 + n := f.pendingUpdate + f.pendingUpdate = 0 return n } diff --git a/vendor/google.golang.org/grpc/transport/go16.go b/vendor/google.golang.org/grpc/transport/go16.go index ee1c46bad5..252509e15c 100644 --- a/vendor/google.golang.org/grpc/transport/go16.go +++ b/vendor/google.golang.org/grpc/transport/go16.go @@ -37,6 +37,8 @@ package transport import ( "net" + "google.golang.org/grpc/codes" + "golang.org/x/net/context" ) @@ -44,3 +46,14 @@ import ( func dialContext(ctx context.Context, network, address string) (net.Conn, error) { return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) } + +// ContextErr converts the error from context package into a StreamError. +func ContextErr(err error) StreamError { + switch err { + case context.DeadlineExceeded: + return streamErrorf(codes.DeadlineExceeded, "%v", err) + case context.Canceled: + return streamErrorf(codes.Canceled, "%v", err) + } + return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err) +} diff --git a/vendor/google.golang.org/grpc/transport/go17.go b/vendor/google.golang.org/grpc/transport/go17.go index 356f13ff19..29f8b14d95 100644 --- a/vendor/google.golang.org/grpc/transport/go17.go +++ b/vendor/google.golang.org/grpc/transport/go17.go @@ -35,12 +35,26 @@ package transport import ( + "context" "net" - "golang.org/x/net/context" + "google.golang.org/grpc/codes" + + netctx "golang.org/x/net/context" ) // dialContext connects to the address on the named network. func dialContext(ctx context.Context, network, address string) (net.Conn, error) { return (&net.Dialer{}).DialContext(ctx, network, address) } + +// ContextErr converts the error from context package into a StreamError. +func ContextErr(err error) StreamError { + switch err { + case context.DeadlineExceeded, netctx.DeadlineExceeded: + return streamErrorf(codes.DeadlineExceeded, "%v", err) + case context.Canceled, netctx.Canceled: + return streamErrorf(codes.Canceled, "%v", err) + } + return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err) +} diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go index 10b6dc0b19..f0bae9474c 100644 --- a/vendor/google.golang.org/grpc/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/transport/handler_server.go @@ -53,6 +53,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" ) // NewServerHandlerTransport returns a ServerTransport handling gRPC @@ -101,14 +102,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr continue } for _, v := range vv { - if k == "user-agent" { - // user-agent is special. Copying logic of http_util.go. - if i := strings.LastIndex(v, " "); i == -1 { - // There is no application user agent string being set - continue - } else { - v = v[:i] - } + v, err := decodeMetadataHeader(k, v) + if err != nil { + return nil, streamErrorf(codes.InvalidArgument, "malformed binary metadata: %v", err) } metakv = append(metakv, k, v) } @@ -174,15 +170,22 @@ func (a strAddr) String() string { return string(a) } // do runs fn in the ServeHTTP goroutine. func (ht *serverHandlerTransport) do(fn func()) error { + // Avoid a panic writing to closed channel. Imperfect but maybe good enough. select { - case ht.writes <- fn: - return nil case <-ht.closedCh: return ErrConnClosing + default: + select { + case ht.writes <- fn: + return nil + case <-ht.closedCh: + return ErrConnClosing + } + } } -func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error { +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { err := ht.do(func() { ht.writeCommonHeaders(s) @@ -192,10 +195,13 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, ht.rw.(http.Flusher).Flush() h := ht.rw.Header() - h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode)) - if statusDesc != "" { - h.Set("Grpc-Message", encodeGrpcMessage(statusDesc)) + h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) + if m := st.Message(); m != "" { + h.Set("Grpc-Message", encodeGrpcMessage(m)) } + + // TODO: Support Grpc-Status-Details-Bin + if md := s.Trailer(); len(md) > 0 { for k, vv := range md { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. @@ -203,10 +209,9 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, continue } for _, v := range vv { - // http2 ResponseWriter mechanism to - // send undeclared Trailers after the - // headers have possibly been written. - h.Add(http2.TrailerPrefix+k, v) + // http2 ResponseWriter mechanism to send undeclared Trailers after + // the headers have possibly been written. + h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) } } } @@ -234,6 +239,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers h.Add("Trailer", "Grpc-Status") h.Add("Trailer", "Grpc-Message") + // TODO: Support Grpc-Status-Details-Bin if s.sendCompress != "" { h.Set("Grpc-Encoding", s.sendCompress) @@ -260,6 +266,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { continue } for _, v := range vv { + v = encodeMetadataHeader(k, v) h.Add(k, v) } } @@ -300,13 +307,13 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace req := ht.req s := &Stream{ - id: 0, // irrelevant - windowHandler: func(int) {}, // nothing - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), + id: 0, // irrelevant + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), } pr := &peer.Peer{ Addr: ht.RemoteAddr(), @@ -314,10 +321,13 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace if req.TLS != nil { pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} } - ctx = metadata.NewContext(ctx, ht.headerMD) + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) ctx = peer.NewContext(ctx, pr) s.ctx = newContextWithStream(ctx, s) - s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf} + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, recv: s.buf}, + windowHandler: func(int) {}, + } // readerDone is closed when the Body.Read-ing goroutine exits. readerDone := make(chan struct{}) diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go index 892f8ba675..92b5180e7f 100644 --- a/vendor/google.golang.org/grpc/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/transport/http2_client.go @@ -35,12 +35,12 @@ package transport import ( "bytes" - "fmt" "io" "math" "net" "strings" "sync" + "sync/atomic" "time" "golang.org/x/net/context" @@ -49,9 +49,11 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" ) // http2Client implements the ClientTransport interface with HTTP2. @@ -80,6 +82,8 @@ type http2Client struct { // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. goAway chan struct{} + // awakenKeepalive is used to wake up keepalive when after it has gone dormant. + awakenKeepalive chan struct{} framer *framer hBuf *bytes.Buffer // the buffer for HPACK encoding @@ -97,10 +101,19 @@ type http2Client struct { // The scheme used: https if TLS is on, http otherwise. scheme string + isSecure bool + creds []credentials.PerRPCCredentials + // Boolean to keep track of reading activity on transport. + // 1 is true and 0 is false. + activity uint32 // Accessed atomically. + kp keepalive.ClientParameters + statsHandler stats.Handler + initialWindowSize int32 + mu sync.Mutex // guard the following variables state transportState // the state of underlying connection activeStreams map[uint32]*Stream @@ -112,6 +125,9 @@ type http2Client struct { goAwayID uint32 // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. prevGoAwayID uint32 + // goAwayReason records the http2.ErrCode and debug data received with the + // GoAway frame. + goAwayReason GoAwayReason } func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { @@ -157,9 +173,9 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( conn, err := dial(ctx, opts.Dialer, addr.Addr) if err != nil { if opts.FailOnNonTempDialError { - return nil, connectionErrorf(isTemporary(err), err, "transport: %v", err) + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) } - return nil, connectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) } // Any further errors will close the underlying connection defer func(conn net.Conn) { @@ -167,7 +183,10 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( conn.Close() } }(conn) - var authInfo credentials.AuthInfo + var ( + isSecure bool + authInfo credentials.AuthInfo + ) if creds := opts.TransportCredentials; creds != nil { scheme = "https" conn, authInfo, err = creds.ClientHandshake(ctx, addr.Addr, conn) @@ -175,43 +194,63 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( // Credentials handshake errors are typically considered permanent // to avoid retrying on e.g. bad certificates. temp := isTemporary(err) - return nil, connectionErrorf(temp, err, "transport: %v", err) + return nil, connectionErrorf(temp, err, "transport: authentication handshake failed: %v", err) } + isSecure = true + } + kp := opts.KeepaliveParams + // Validate keepalive parameters. + if kp.Time == 0 { + kp.Time = defaultClientKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultClientKeepaliveTimeout } - ua := primaryUA - if opts.UserAgent != "" { - ua = opts.UserAgent + " " + ua + icwz := int32(initialConnWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize } var buf bytes.Buffer t := &http2Client{ ctx: ctx, target: addr.Addr, - userAgent: ua, + userAgent: opts.UserAgent, md: addr.Metadata, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), authInfo: authInfo, // The client initiated stream id is odd starting from 1. - nextID: 1, - writableChan: make(chan int, 1), - shutdownChan: make(chan struct{}), - errorChan: make(chan struct{}), - goAway: make(chan struct{}), - framer: newFramer(conn), - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - controlBuf: newRecvBuffer(), - fc: &inFlow{limit: initialConnWindowSize}, - sendQuotaPool: newQuotaPool(defaultWindowSize), - scheme: scheme, - state: reachable, - activeStreams: make(map[uint32]*Stream), - creds: opts.PerRPCCredentials, - maxStreams: math.MaxInt32, - streamSendQuota: defaultWindowSize, - statsHandler: opts.StatsHandler, - } + nextID: 1, + writableChan: make(chan int, 1), + shutdownChan: make(chan struct{}), + errorChan: make(chan struct{}), + goAway: make(chan struct{}), + awakenKeepalive: make(chan struct{}, 1), + framer: newFramer(conn), + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + controlBuf: newRecvBuffer(), + fc: &inFlow{limit: uint32(icwz)}, + sendQuotaPool: newQuotaPool(defaultWindowSize), + scheme: scheme, + state: reachable, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + creds: opts.PerRPCCredentials, + maxStreams: defaultMaxStreamsClient, + streamsQuota: newQuotaPool(defaultMaxStreamsClient), + streamSendQuota: defaultWindowSize, + kp: kp, + statsHandler: opts.StatsHandler, + initialWindowSize: initialWindowSize, + } + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + } + // Make sure awakenKeepalive can't be written upon. + // keepalive routine will make it writable, if need be. + t.awakenKeepalive <- struct{}{} if t.statsHandler != nil { t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, @@ -230,32 +269,35 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( n, err := t.conn.Write(clientPreface) if err != nil { t.Close() - return nil, connectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) } if n != len(clientPreface) { t.Close() return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) } - if initialWindowSize != defaultWindowSize { + if t.initialWindowSize != defaultWindowSize { err = t.framer.writeSettings(true, http2.Setting{ ID: http2.SettingInitialWindowSize, - Val: uint32(initialWindowSize), + Val: uint32(t.initialWindowSize), }) } else { err = t.framer.writeSettings(true) } if err != nil { t.Close() - return nil, connectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) } // Adjust the connection flow control window if needed. - if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { + if delta := uint32(icwz - defaultWindowSize); delta > 0 { if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil { t.Close() - return nil, connectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) } } go t.controller() + if t.kp.Time != infinity { + go t.keepalive() + } t.writableChan <- 0 return t, nil } @@ -269,27 +311,33 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { method: callHdr.Method, sendCompress: callHdr.SendCompress, buf: newRecvBuffer(), - fc: &inFlow{limit: initialWindowSize}, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), headerChan: make(chan struct{}), } t.nextID += 2 - s.windowHandler = func(n int) { - t.updateWindow(s, uint32(n)) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) } // The client side stream context should have exactly the same life cycle with the user provided context. // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. // So we use the original context here instead of creating a copy. s.ctx = ctx - s.dec = &recvBufferReader{ - ctx: s.ctx, - goAway: s.goAway, - recv: s.buf, + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + goAway: s.goAway, + recv: s.buf, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, } + return s } -// NewStream creates a stream and register it into the transport as "active" +// NewStream creates a stream and registers it into the transport as "active" // streams. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { pr := &peer.Peer{ @@ -299,10 +347,13 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea if t.authInfo != nil { pr.AuthInfo = t.authInfo } - userCtx := ctx ctx = peer.NewContext(ctx, pr) - authData := make(map[string]string) - for _, c := range t.creds { + var ( + authData = make(map[string]string) + audience string + ) + // Create an audience string only if needed. + if len(t.creds) > 0 || callHdr.Creds != nil { // Construct URI required to get auth request metadata. var port string if pos := strings.LastIndex(t.target, ":"); pos != -1 { @@ -313,17 +364,39 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } pos := strings.LastIndex(callHdr.Method, "/") if pos == -1 { - return nil, streamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method) + pos = len(callHdr.Method) } - audience := "https://" + callHdr.Host + port + callHdr.Method[:pos] + audience = "https://" + callHdr.Host + port + callHdr.Method[:pos] + } + for _, c := range t.creds { data, err := c.GetRequestMetadata(ctx, audience) if err != nil { - return nil, streamErrorf(codes.InvalidArgument, "transport: %v", err) + return nil, streamErrorf(codes.Internal, "transport: %v", err) } for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) authData[k] = v } } + callAuthData := make(map[string]string) + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if !t.isSecure && callCreds.RequireTransportSecurity() { + return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure conneciton") + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, streamErrorf(codes.Internal, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } t.mu.Lock() if t.activeStreams == nil { t.mu.Unlock() @@ -337,21 +410,18 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.mu.Unlock() return nil, ErrConnClosing } - checkStreamsQuota := t.streamsQuota != nil t.mu.Unlock() - if checkStreamsQuota { - sq, err := wait(ctx, nil, nil, t.shutdownChan, t.streamsQuota.acquire()) - if err != nil { - return nil, err - } - // Returns the quota balance back. - if sq > 1 { - t.streamsQuota.add(sq - 1) - } + sq, err := wait(ctx, nil, nil, t.shutdownChan, t.streamsQuota.acquire()) + if err != nil { + return nil, err + } + // Returns the quota balance back. + if sq > 1 { + t.streamsQuota.add(sq - 1) } if _, err := wait(ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { // Return the quota back now because there is no stream returned to the caller. - if _, ok := err.(StreamError); ok && checkStreamsQuota { + if _, ok := err.(StreamError); ok { t.streamsQuota.add(1) } return nil, err @@ -359,9 +429,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.mu.Lock() if t.state == draining { t.mu.Unlock() - if checkStreamsQuota { - t.streamsQuota.add(1) - } + t.streamsQuota.add(1) // Need to make t writable again so that the rpc in flight can still proceed. t.writableChan <- 0 return nil, ErrStreamDrain @@ -371,19 +439,18 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return nil, ErrConnClosing } s := t.newStream(ctx, callHdr) - s.clientStatsCtx = userCtx t.activeStreams[s.id] = s - - // This stream is not counted when applySetings(...) initialize t.streamsQuota. - // Reset t.streamsQuota to the right value. - var reset bool - if !checkStreamsQuota && t.streamsQuota != nil { - reset = true + // If the number of active streams change from 0 to 1, then check if keepalive + // has gone dormant. If so, wake it up. + if len(t.activeStreams) == 1 { + select { + case t.awakenKeepalive <- struct{}{}: + t.framer.writePing(false, false, [8]byte{}) + default: + } } + t.mu.Unlock() - if reset { - t.streamsQuota.add(-1) - } // HPACK encodes various headers. Note that once WriteField(...) is // called, the corresponding headers/continuation frame has to be sent @@ -407,33 +474,34 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } for k, v := range authData { - // Capital header names are illegal in HTTP/2. - k = strings.ToLower(k) - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } var ( hasMD bool endHeaders bool ) - if md, ok := metadata.FromContext(ctx); ok { + if md, ok := metadata.FromOutgoingContext(ctx); ok { hasMD = true - for k, v := range md { + for k, vv := range md { // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. if isReservedHeader(k) { continue } - for _, entry := range v { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + for _, v := range vv { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } } if md, ok := t.md.(*metadata.MD); ok { - for k, v := range *md { + for k, vv := range *md { if isReservedHeader(k) { continue } - for _, entry := range v { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + for _, v := range vv { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } } @@ -473,6 +541,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return nil, connectionErrorf(true, err, "transport: %v", err) } } + s.bytesSent = true + if t.statsHandler != nil { outHeader := &stats.OutHeader{ Client: true, @@ -482,7 +552,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea LocalAddr: t.localAddr, Compression: callHdr.SendCompress, } - t.statsHandler.HandleRPC(s.clientStatsCtx, outHeader) + t.statsHandler.HandleRPC(s.ctx, outHeader) } t.writableChan <- 0 return s, nil @@ -491,15 +561,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea // CloseStream clears the footprint of a stream when the stream is not needed any more. // This must not be executed in reader's goroutine. func (t *http2Client) CloseStream(s *Stream, err error) { - var updateStreams bool t.mu.Lock() if t.activeStreams == nil { t.mu.Unlock() return } - if t.streamsQuota != nil { - updateStreams = true - } delete(t.activeStreams, s.id) if t.state == draining && len(t.activeStreams) == 0 { // The transport is draining and s is the last live stream on t. @@ -508,15 +574,27 @@ func (t *http2Client) CloseStream(s *Stream, err error) { return } t.mu.Unlock() - if updateStreams { - t.streamsQuota.add(1) - } - s.mu.Lock() - if q := s.fc.resetPendingData(); q > 0 { - if n := t.fc.onRead(q); n > 0 { - t.controlBuf.put(&windowUpdate{0, n}) + // rstStream is true in case the stream is being closed at the client-side + // and the server needs to be intimated about it by sending a RST_STREAM + // frame. + // To make sure this frame is written to the wire before the headers of the + // next stream waiting for streamsQuota, we add to streamsQuota pool only + // after having acquired the writableChan to send RST_STREAM out (look at + // the controller() routine). + var rstStream bool + var rstError http2.ErrCode + defer func() { + // In case, the client doesn't have to send RST_STREAM to server + // we can safely add back to streamsQuota pool now. + if !rstStream { + t.streamsQuota.add(1) + return } - } + t.controlBuf.put(&resetStream{s.id, rstError}) + }() + s.mu.Lock() + rstStream = s.rstStream + rstError = s.rstError if s.state == streamDone { s.mu.Unlock() return @@ -527,8 +605,9 @@ func (t *http2Client) CloseStream(s *Stream, err error) { } s.state = streamDone s.mu.Unlock() - if se, ok := err.(StreamError); ok && se.Code != codes.DeadlineExceeded { - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeCancel}) + if _, ok := err.(StreamError); ok { + rstStream = true + rstError = http2.ErrCodeCancel } } @@ -724,6 +803,24 @@ func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { return s, ok } +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return + } + if w := s.fc.maybeAdjust(n); w > 0 { + // Piggyback conneciton's window update along. + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw, false}) + } + t.controlBuf.put(&windowUpdate{s.id, w, true}) + } +} + // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. @@ -733,55 +830,64 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { if s.state == streamDone { return } - if w := t.fc.onRead(n); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } if w := s.fc.onRead(n); w > 0 { - t.controlBuf.put(&windowUpdate{s.id, w}) + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw, false}) + } + t.controlBuf.put(&windowUpdate{s.id, w, true}) } } func (t *http2Client) handleData(f *http2.DataFrame) { - size := len(f.Data()) + size := f.Header().Length if err := t.fc.onData(uint32(size)); err != nil { t.notifyError(connectionErrorf(true, err, "%v", err)) return } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w, true}) + } // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } return } if size > 0 { s.mu.Lock() if s.state == streamDone { s.mu.Unlock() - // The stream has been closed. Release the corresponding quota. - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } return } if err := s.fc.onData(uint32(size)); err != nil { - s.state = streamDone - s.statusCode = codes.Internal - s.statusDesc = err.Error() - close(s.done) + s.rstStream = true + s.rstError = http2.ErrCodeFlowControl + s.finish(status.New(codes.Internal, err.Error())) s.mu.Unlock() s.write(recvMsg{err: io.EOF}) - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) return } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&windowUpdate{s.id, w, true}) + } + } s.mu.Unlock() // TODO(bradfitz, zhaoq): A copy is required here because there is no // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? - data := make([]byte, size) - copy(data, f.Data()) - s.write(recvMsg{data: data}) + if len(f.Data()) > 0 { + data := make([]byte, len(f.Data())) + copy(data, f.Data()) + s.write(recvMsg{data: data}) + } } // The server has closed the stream without sending trailers. Record that // the read direction is closed, and set the status appropriately. @@ -791,10 +897,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) { s.mu.Unlock() return } - s.state = streamDone - s.statusCode = codes.Internal - s.statusDesc = "server closed the stream without sending trailers" - close(s.done) + s.finish(status.New(codes.Internal, "server closed the stream without sending trailers")) s.mu.Unlock() s.write(recvMsg{err: io.EOF}) } @@ -810,18 +913,16 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { s.mu.Unlock() return } - s.state = streamDone if !s.headerDone { close(s.headerChan) s.headerDone = true } - s.statusCode, ok = http2ErrConvTab[http2.ErrCode(f.ErrCode)] + statusCode, ok := http2ErrConvTab[http2.ErrCode(f.ErrCode)] if !ok { grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) - s.statusCode = codes.Unknown + statusCode = codes.Unknown } - s.statusDesc = fmt.Sprintf("stream terminated by RST_STREAM with error code: %d", f.ErrCode) - close(s.done) + s.finish(status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %d", f.ErrCode)) s.mu.Unlock() s.write(recvMsg{err: io.EOF}) } @@ -849,6 +950,9 @@ func (t *http2Client) handlePing(f *http2.PingFrame) { } func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { + grpclog.Printf("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } t.mu.Lock() if t.state == reachable || t.state == draining { if f.LastStreamID > 0 && f.LastStreamID%2 != 1 { @@ -870,6 +974,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.mu.Unlock() return default: + t.setGoAwayReason(f) } t.goAwayID = f.LastStreamID close(t.goAway) @@ -877,6 +982,26 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.mu.Unlock() } +// setGoAwayReason sets the value of t.goAwayReason based +// on the GoAway frame received. +// It expects a lock on transport's mutext to be held by +// the caller. +func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { + t.goAwayReason = NoReason + switch f.ErrCode { + case http2.ErrCodeEnhanceYourCalm: + if string(f.DebugData()) == "too_many_pings" { + t.goAwayReason = TooManyPings + } + } +} + +func (t *http2Client) GetGoAwayReason() GoAwayReason { + t.mu.Lock() + defer t.mu.Unlock() + return t.goAwayReason +} + func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { id := f.Header().StreamID incr := f.Increment @@ -895,18 +1020,16 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if !ok { return } + s.bytesReceived = true var state decodeState - for _, hf := range frame.Fields { - state.processHeaderField(hf) - } - if state.err != nil { + if err := state.decodeResponseHeader(frame); err != nil { s.mu.Lock() if !s.headerDone { close(s.headerChan) s.headerDone = true } s.mu.Unlock() - s.write(recvMsg{err: state.err}) + s.write(recvMsg{err: err}) // Something wrong. Stops reading even when there is remaining. return } @@ -920,13 +1043,13 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { Client: true, WireLength: int(frame.Header().Length), } - t.statsHandler.HandleRPC(s.clientStatsCtx, inHeader) + t.statsHandler.HandleRPC(s.ctx, inHeader) } else { inTrailer := &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), } - t.statsHandler.HandleRPC(s.clientStatsCtx, inTrailer) + t.statsHandler.HandleRPC(s.ctx, inTrailer) } } }() @@ -951,10 +1074,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if len(state.mdata) > 0 { s.trailer = state.mdata } - s.statusCode = state.statusCode - s.statusDesc = state.statusDesc - close(s.done) - s.state = streamDone + s.finish(state.status()) s.mu.Unlock() s.write(recvMsg{err: io.EOF}) } @@ -982,6 +1102,7 @@ func (t *http2Client) reader() { t.notifyError(err) return } + atomic.CompareAndSwapUint32(&t.activity, 0, 1) sf, ok := frame.(*http2.SettingsFrame) if !ok { t.notifyError(err) @@ -992,6 +1113,7 @@ func (t *http2Client) reader() { // loop to keep reading incoming messages on this transport. for { frame, err := t.framer.readFrame() + atomic.CompareAndSwapUint32(&t.activity, 0, 1) if err != nil { // Abort an active stream if the http2.Framer returns a // http2.StreamError. This can happen only if the server's response @@ -1043,21 +1165,15 @@ func (t *http2Client) applySettings(ss []http2.Setting) { s.Val = math.MaxInt32 } t.mu.Lock() - reset := t.streamsQuota != nil - if !reset { - t.streamsQuota = newQuotaPool(int(s.Val) - len(t.activeStreams)) - } ms := t.maxStreams t.maxStreams = int(s.Val) t.mu.Unlock() - if reset { - t.streamsQuota.add(int(s.Val) - ms) - } + t.streamsQuota.add(int(s.Val) - ms) case http2.SettingInitialWindowSize: t.mu.Lock() for _, stream := range t.activeStreams { // Adjust the sending quota for each stream. - stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota)) + stream.sendQuotaPool.add(int(s.Val) - int(t.streamSendQuota)) } t.streamSendQuota = s.Val t.mu.Unlock() @@ -1076,7 +1192,7 @@ func (t *http2Client) controller() { case <-t.writableChan: switch i := i.(type) { case *windowUpdate: - t.framer.writeWindowUpdate(true, i.streamID, i.increment) + t.framer.writeWindowUpdate(i.flush, i.streamID, i.increment) case *settings: if i.ack { t.framer.writeSettingsAck(true) @@ -1085,6 +1201,12 @@ func (t *http2Client) controller() { t.framer.writeSettings(true, i.ss...) } case *resetStream: + // If the server needs to be to intimated about stream closing, + // then we need to make sure the RST_STREAM frame is written to + // the wire before the headers of the next stream waiting on + // streamQuota. We ensure this by adding to the streamsQuota pool + // only after having acquired the writableChan to send RST_STREAM. + t.streamsQuota.add(1) t.framer.writeRSTStream(true, i.streamID, i.code) case *flushIO: t.framer.flushWrite() @@ -1104,6 +1226,61 @@ func (t *http2Client) controller() { } } +// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. +func (t *http2Client) keepalive() { + p := &ping{data: [8]byte{}} + timer := time.NewTimer(t.kp.Time) + for { + select { + case <-timer.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + timer.Reset(t.kp.Time) + continue + } + // Check if keepalive should go dormant. + t.mu.Lock() + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // Make awakenKeepalive writable. + <-t.awakenKeepalive + t.mu.Unlock() + select { + case <-t.awakenKeepalive: + // If the control gets here a ping has been sent + // need to reset the timer with keepalive.Timeout. + case <-t.shutdownChan: + return + } + } else { + t.mu.Unlock() + // Send ping. + t.controlBuf.put(p) + } + + // By the time control gets here a ping has been sent one way or the other. + timer.Reset(t.kp.Timeout) + select { + case <-timer.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + timer.Reset(t.kp.Time) + continue + } + t.Close() + return + case <-t.shutdownChan: + if !timer.Stop() { + <-timer.C + } + return + } + case <-t.shutdownChan: + if !timer.Stop() { + <-timer.C + } + return + } + } +} + func (t *http2Client) Error() <-chan struct{} { return t.errorChan } diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go index a095dd0e07..3a18f30798 100644 --- a/vendor/google.golang.org/grpc/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/transport/http2_server.go @@ -38,19 +38,25 @@ import ( "errors" "io" "math" + "math/rand" "net" "strconv" "sync" + "sync/atomic" + "time" + "github.com/golang/protobuf/proto" "golang.org/x/net/context" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" "google.golang.org/grpc/tap" ) @@ -90,11 +96,35 @@ type http2Server struct { stats stats.Handler + // Flag to keep track of reading activity on transport. + // 1 is true and 0 is false. + activity uint32 // Accessed atomically. + // Keepalive and max-age parameters for the server. + kp keepalive.ServerParameters + + // Keepalive enforcement policy. + kep keepalive.EnforcementPolicy + // The time instance last ping was received. + lastPingAt time.Time + // Number of times the client has violated keepalive ping policy so far. + pingStrikes uint8 + // Flag to signify that number of ping strikes should be reset to 0. + // This is set whenever data or header frames are sent. + // 1 means yes. + resetPingStrikes uint32 // Accessed atomically. + + initialWindowSize int32 + mu sync.Mutex // guard the following state transportState activeStreams map[uint32]*Stream // the per-stream outbound flow control window size set by the peer. streamSendQuota uint32 + // idle is the time instant when the connection went idle. + // This is either the begining of the connection or when the number of + // RPCs go down to 0. + // When the connection is busy, this value is set to 0. + idle time.Time } // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is @@ -114,41 +144,75 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err Val: maxStreams, }) } - if initialWindowSize != defaultWindowSize { + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + } + icwz := int32(initialConnWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + } + if iwz != defaultWindowSize { settings = append(settings, http2.Setting{ ID: http2.SettingInitialWindowSize, - Val: uint32(initialWindowSize)}) + Val: uint32(iwz)}) } if err := framer.writeSettings(true, settings...); err != nil { return nil, connectionErrorf(true, err, "transport: %v", err) } // Adjust the connection flow control window if needed. - if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { + if delta := uint32(icwz - defaultWindowSize); delta > 0 { if err := framer.writeWindowUpdate(true, 0, delta); err != nil { return nil, connectionErrorf(true, err, "transport: %v", err) } } + kp := config.KeepaliveParams + if kp.MaxConnectionIdle == 0 { + kp.MaxConnectionIdle = defaultMaxConnectionIdle + } + if kp.MaxConnectionAge == 0 { + kp.MaxConnectionAge = defaultMaxConnectionAge + } + // Add a jitter to MaxConnectionAge. + kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) + if kp.MaxConnectionAgeGrace == 0 { + kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace + } + if kp.Time == 0 { + kp.Time = defaultServerKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultServerKeepaliveTimeout + } + kep := config.KeepalivePolicy + if kep.MinTime == 0 { + kep.MinTime = defaultKeepalivePolicyMinTime + } var buf bytes.Buffer t := &http2Server{ - ctx: context.Background(), - conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: config.AuthInfo, - framer: framer, - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - maxStreams: maxStreams, - inTapHandle: config.InTapHandle, - controlBuf: newRecvBuffer(), - fc: &inFlow{limit: initialConnWindowSize}, - sendQuotaPool: newQuotaPool(defaultWindowSize), - state: reachable, - writableChan: make(chan int, 1), - shutdownChan: make(chan struct{}), - activeStreams: make(map[uint32]*Stream), - streamSendQuota: defaultWindowSize, - stats: config.StatsHandler, + ctx: context.Background(), + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: config.AuthInfo, + framer: framer, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + maxStreams: maxStreams, + inTapHandle: config.InTapHandle, + controlBuf: newRecvBuffer(), + fc: &inFlow{limit: uint32(icwz)}, + sendQuotaPool: newQuotaPool(defaultWindowSize), + state: reachable, + writableChan: make(chan int, 1), + shutdownChan: make(chan struct{}), + activeStreams: make(map[uint32]*Stream), + streamSendQuota: defaultWindowSize, + stats: config.StatsHandler, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, } if t.stats != nil { t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ @@ -159,6 +223,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err t.stats.HandleConn(t.ctx, connBegin) } go t.controller() + go t.keepalive() t.writableChan <- 0 return t, nil } @@ -170,18 +235,17 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( id: frame.Header().StreamID, st: t, buf: buf, - fc: &inFlow{limit: initialWindowSize}, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, } var state decodeState for _, hf := range frame.Fields { - state.processHeaderField(hf) - } - if err := state.err; err != nil { - if se, ok := err.(StreamError); ok { - t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) + if err := state.processHeaderField(hf); err != nil { + if se, ok := err.(StreamError); ok { + t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) + } + return } - return } if frame.StreamEnded() { @@ -208,12 +272,16 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.ctx = newContextWithStream(s.ctx, s) // Attach the received metadata to the context. if len(state.mdata) > 0 { - s.ctx = metadata.NewContext(s.ctx, state.mdata) + s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata) } - - s.dec = &recvBufferReader{ - ctx: s.ctx, - recv: s.buf, + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + recv: s.buf, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, } s.recvCompress = state.encoding s.method = state.method @@ -224,7 +292,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } s.ctx, err = t.inTapHandle(s.ctx, info) if err != nil { - // TODO: Log the real error. + grpclog.Printf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) return } @@ -248,9 +316,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( t.maxStreamID = s.id s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) t.activeStreams[s.id] = s + if len(t.activeStreams) == 1 { + t.idle = time.Time{} + } t.mu.Unlock() - s.windowHandler = func(n int) { - t.updateWindow(s, uint32(n)) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) } s.ctx = traceCtx(s.ctx, s.method) if t.stats != nil { @@ -275,7 +346,10 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. // Check the validity of client preface. preface := make([]byte, len(clientPreface)) if _, err := io.ReadFull(t.conn, preface); err != nil { - grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + // Only log if it isn't a simple tcp accept check (ie: tcp balancer doing open/close socket) + if err != io.EOF { + grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } t.Close() return } @@ -291,10 +365,11 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. return } if err != nil { - grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) + grpclog.Printf("transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) t.Close() return } + atomic.StoreUint32(&t.activity, 1) sf, ok := frame.(*http2.SettingsFrame) if !ok { grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) @@ -305,6 +380,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. for { frame, err := t.framer.readFrame() + atomic.StoreUint32(&t.activity, 1) if err != nil { if se, ok := err.(http2.StreamError); ok { t.mu.Lock() @@ -363,6 +439,23 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { return s, true } +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return + } + if w := s.fc.maybeAdjust(n); w > 0 { + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw, false}) + } + t.controlBuf.put(&windowUpdate{s.id, w, true}) + } +} + // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. @@ -372,37 +465,41 @@ func (t *http2Server) updateWindow(s *Stream, n uint32) { if s.state == streamDone { return } - if w := t.fc.onRead(n); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } if w := s.fc.onRead(n); w > 0 { - t.controlBuf.put(&windowUpdate{s.id, w}) + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw, false}) + } + t.controlBuf.put(&windowUpdate{s.id, w, true}) } } func (t *http2Server) handleData(f *http2.DataFrame) { - size := len(f.Data()) + size := f.Header().Length if err := t.fc.onData(uint32(size)); err != nil { grpclog.Printf("transport: http2Server %v", err) t.Close() return } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w, true}) + } // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } return } if size > 0 { s.mu.Lock() if s.state == streamDone { s.mu.Unlock() - // The stream has been closed. Release the corresponding quota. - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } return } if err := s.fc.onData(uint32(size)); err != nil { @@ -411,13 +508,20 @@ func (t *http2Server) handleData(f *http2.DataFrame) { t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) return } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&windowUpdate{s.id, w, true}) + } + } s.mu.Unlock() // TODO(bradfitz, zhaoq): A copy is required here because there is no // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? - data := make([]byte, size) - copy(data, f.Data()) - s.write(recvMsg{data: data}) + if len(f.Data()) > 0 { + data := make([]byte, len(f.Data())) + copy(data, f.Data()) + s.write(recvMsg{data: data}) + } } if f.Header().Flags.Has(http2.FlagDataEndStream) { // Received the end of stream from the client. @@ -451,6 +555,11 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { t.controlBuf.put(&settings{ack: true, ss: ss}) } +const ( + maxPingStrikes = 2 + defaultPingTimeout = 2 * time.Hour +) + func (t *http2Server) handlePing(f *http2.PingFrame) { if f.IsAck() { // Do nothing. return @@ -458,6 +567,38 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { pingAck := &ping{ack: true} copy(pingAck.data[:], f.Data[:]) t.controlBuf.put(pingAck) + + now := time.Now() + defer func() { + t.lastPingAt = now + }() + // A reset ping strikes means that we don't need to check for policy + // violation for this ping and the pingStrikes counter should be set + // to 0. + if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { + t.pingStrikes = 0 + return + } + t.mu.Lock() + ns := len(t.activeStreams) + t.mu.Unlock() + if ns < 1 && !t.kep.PermitWithoutStream { + // Keepalive shouldn't be active thus, this new ping should + // have come after atleast defaultPingTimeout. + if t.lastPingAt.Add(defaultPingTimeout).After(now) { + t.pingStrikes++ + } + } else { + // Check if keepalive policy is respected. + if t.lastPingAt.Add(t.kep.MinTime).After(now) { + t.pingStrikes++ + } + } + + if t.pingStrikes > maxPingStrikes { + // Send goaway and close the connection. + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings")}) + } } func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { @@ -476,6 +617,13 @@ func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) e first := true endHeaders := false var err error + defer func() { + if err == nil { + // Reset ping strikes when seding headers since that might cause the + // peer to send ping. + atomic.StoreUint32(&t.resetPingStrikes, 1) + } + }() // Sends the headers in a single batch. for !endHeaders { size := t.hBuf.Len() @@ -530,13 +678,13 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { if s.sendCompress != "" { t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } - for k, v := range md { + for k, vv := range md { if isReservedHeader(k) { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. continue } - for _, entry := range v { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + for _, v := range vv { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } bufLen := t.hBuf.Len() @@ -557,7 +705,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { // There is no further I/O operations being able to perform on this stream. // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. -func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error { +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { var headersSent, hasHeader bool s.mu.Lock() if s.state == streamDone { @@ -588,17 +736,28 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s t.hEnc.WriteField( hpack.HeaderField{ Name: "grpc-status", - Value: strconv.Itoa(int(statusCode)), + Value: strconv.Itoa(int(st.Code())), }) - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(statusDesc)}) + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + } + // Attach the trailer metadata. - for k, v := range s.trailer { + for k, vv := range s.trailer { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. if isReservedHeader(k) { continue } - for _, entry := range v { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + for _, v := range vv { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } bufLen := t.hBuf.Len() @@ -619,7 +778,7 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { +func (t *http2Server) Write(s *Stream, data []byte, opts *Options) (err error) { // TODO(zhaoq): Support multi-writers for a single stream. var writeHeaderFrame bool s.mu.Lock() @@ -634,6 +793,13 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { if writeHeaderFrame { t.WriteHeader(s, nil) } + defer func() { + if err == nil { + // Reset ping strikes when sending data since this might cause + // the peer to send ping. + atomic.StoreUint32(&t.resetPingStrikes, 1) + } + }() r := bytes.NewBuffer(data) for { if r.Len() == 0 { @@ -715,7 +881,7 @@ func (t *http2Server) applySettings(ss []http2.Setting) { t.mu.Lock() defer t.mu.Unlock() for _, stream := range t.activeStreams { - stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota)) + stream.sendQuotaPool.add(int(s.Val) - int(t.streamSendQuota)) } t.streamSendQuota = s.Val } @@ -723,6 +889,91 @@ func (t *http2Server) applySettings(ss []http2.Setting) { } } +// keepalive running in a separate goroutine does the following: +// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. +// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. +// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-resposive connection +// after an additional duration of keepalive.Timeout. +func (t *http2Server) keepalive() { + p := &ping{} + var pingSent bool + maxIdle := time.NewTimer(t.kp.MaxConnectionIdle) + maxAge := time.NewTimer(t.kp.MaxConnectionAge) + keepalive := time.NewTimer(t.kp.Time) + // NOTE: All exit paths of this function should reset their + // respecitve timers. A failure to do so will cause the + // following clean-up to deadlock and eventually leak. + defer func() { + if !maxIdle.Stop() { + <-maxIdle.C + } + if !maxAge.Stop() { + <-maxAge.C + } + if !keepalive.Stop() { + <-keepalive.C + } + }() + for { + select { + case <-maxIdle.C: + t.mu.Lock() + idle := t.idle + if idle.IsZero() { // The connection is non-idle. + t.mu.Unlock() + maxIdle.Reset(t.kp.MaxConnectionIdle) + continue + } + val := t.kp.MaxConnectionIdle - time.Since(idle) + if val <= 0 { + // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. + // Gracefully close the connection. + t.state = draining + t.mu.Unlock() + t.Drain() + // Reseting the timer so that the clean-up doesn't deadlock. + maxIdle.Reset(infinity) + return + } + t.mu.Unlock() + maxIdle.Reset(val) + case <-maxAge.C: + t.mu.Lock() + t.state = draining + t.mu.Unlock() + t.Drain() + maxAge.Reset(t.kp.MaxConnectionAgeGrace) + select { + case <-maxAge.C: + // Close the connection after grace period. + t.Close() + // Reseting the timer so that the clean-up doesn't deadlock. + maxAge.Reset(infinity) + case <-t.shutdownChan: + } + return + case <-keepalive.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + pingSent = false + keepalive.Reset(t.kp.Time) + continue + } + if pingSent { + t.Close() + // Reseting the timer so that the clean-up doesn't deadlock. + keepalive.Reset(infinity) + return + } + pingSent = true + t.controlBuf.put(p) + keepalive.Reset(t.kp.Timeout) + case <-t.shutdownChan: + return + } + } +} + // controller running in a separate goroutine takes charge of sending control // frames (e.g., window update, reset stream, setting, etc.) to the server. func (t *http2Server) controller() { @@ -734,7 +985,7 @@ func (t *http2Server) controller() { case <-t.writableChan: switch i := i.(type) { case *windowUpdate: - t.framer.writeWindowUpdate(true, i.streamID, i.increment) + t.framer.writeWindowUpdate(i.flush, i.streamID, i.increment) case *settings: if i.ack { t.framer.writeSettingsAck(true) @@ -754,7 +1005,10 @@ func (t *http2Server) controller() { sid := t.maxStreamID t.state = draining t.mu.Unlock() - t.framer.writeGoAway(true, sid, http2.ErrCodeNo, nil) + t.framer.writeGoAway(true, sid, i.code, i.debugData) + if i.code == http2.ErrCodeEnhanceYourCalm { + t.Close() + } case *flushIO: t.framer.flushWrite() case *ping: @@ -804,6 +1058,9 @@ func (t *http2Server) Close() (err error) { func (t *http2Server) closeStream(s *Stream) { t.mu.Lock() delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } if t.state == draining && len(t.activeStreams) == 0 { defer t.Close() } @@ -813,11 +1070,6 @@ func (t *http2Server) closeStream(s *Stream) { // called to interrupt the potential blocking on other goroutines. s.cancel() s.mu.Lock() - if q := s.fc.resetPendingData(); q > 0 { - if w := t.fc.onRead(q); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - } if s.state == streamDone { s.mu.Unlock() return @@ -831,5 +1083,17 @@ func (t *http2Server) RemoteAddr() net.Addr { } func (t *http2Server) Drain() { - t.controlBuf.put(&goAway{}) + t.controlBuf.put(&goAway{code: http2.ErrCodeNo}) +} + +var rgen = rand.New(rand.NewSource(time.Now().UnixNano())) + +func getJitter(v time.Duration) time.Duration { + if v == infinity { + return 0 + } + // Generate a jitter between +/- 10% of the value. + r := int64(v / 10) + j := rgen.Int63n(2*r) - r + return time.Duration(j) } diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go index a3c68d4cac..9b31717c41 100644 --- a/vendor/google.golang.org/grpc/transport/http_util.go +++ b/vendor/google.golang.org/grpc/transport/http_util.go @@ -36,24 +36,26 @@ package transport import ( "bufio" "bytes" + "encoding/base64" "fmt" "io" "net" + "net/http" "strconv" "strings" "sync/atomic" "time" + "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) const ( - // The primary user agent - primaryUA = "grpc-go/1.0" // http2MaxFrameLen specifies the max length of a HTTP2 frame. http2MaxFrameLen = 16384 // 16KB frame // http://http2.github.io/http2-spec/#SettingValues @@ -87,18 +89,39 @@ var ( codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, codes.PermissionDenied: http2.ErrCodeInadequateSecurity, } + httpStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } ) // Records the states during HPACK decoding. Must be reset once the // decoding of the entire headers are finished. type decodeState struct { - err error // first error encountered decoding - encoding string - // statusCode caches the stream status received from the trailer - // the server sent. Client side only. - statusCode codes.Code - statusDesc string + // statusGen caches the stream status received from the trailer the server + // sent. Client side only. Do not access directly. After all trailers are + // parsed, use the status method to retrieve the status. + statusGen *status.Status + // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not + // intended for direct access outside of parsing. + rawStatusCode *int + rawStatusMsg string + httpStatus *int // Server side only fields. timeoutSet bool timeout time.Duration @@ -121,6 +144,7 @@ func isReservedHeader(hdr string) bool { "grpc-message", "grpc-status", "grpc-timeout", + "grpc-status-details-bin", "te": return true default: @@ -139,12 +163,6 @@ func isWhitelistedPseudoHeader(hdr string) bool { } } -func (d *decodeState) setErr(err error) { - if d.err == nil { - d.err = err - } -} - func validContentType(t string) bool { e := "application/grpc" if !strings.HasPrefix(t, e) { @@ -158,56 +176,135 @@ func validContentType(t string) bool { return true } -func (d *decodeState) processHeaderField(f hpack.HeaderField) { +func (d *decodeState) status() *status.Status { + if d.statusGen == nil { + // No status-details were provided; generate status using code/msg. + d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg) + } + return d.statusGen +} + +const binHdrSuffix = "-bin" + +func encodeBinHeader(v []byte) string { + return base64.RawStdEncoding.EncodeToString(v) +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +func encodeMetadataHeader(k, v string) string { + if strings.HasSuffix(k, binHdrSuffix) { + return encodeBinHeader(([]byte)(v)) + } + return v +} + +func decodeMetadataHeader(k, v string) (string, error) { + if strings.HasSuffix(k, binHdrSuffix) { + b, err := decodeBinHeader(v) + return string(b), err + } + return v, nil +} + +func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error { + for _, hf := range frame.Fields { + if err := d.processHeaderField(hf); err != nil { + return err + } + } + + // If grpc status exists, no need to check further. + if d.rawStatusCode != nil || d.statusGen != nil { + return nil + } + + // If grpc status doesn't exist and http status doesn't exist, + // then it's a malformed header. + if d.httpStatus == nil { + return streamErrorf(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)") + } + + if *(d.httpStatus) != http.StatusOK { + code, ok := httpStatusConvTab[*(d.httpStatus)] + if !ok { + code = codes.Unknown + } + return streamErrorf(code, http.StatusText(*(d.httpStatus))) + } + + // gRPC status doesn't exist and http status is OK. + // Set rawStatusCode to be unknown and return nil error. + // So that, if the stream has ended this Unknown status + // will be propogated to the user. + // Otherwise, it will be ignored. In which case, status from + // a later trailer, that has StreamEnded flag set, is propogated. + code := int(codes.Unknown) + d.rawStatusCode = &code + return nil + +} + +func (d *decodeState) processHeaderField(f hpack.HeaderField) error { switch f.Name { case "content-type": if !validContentType(f.Value) { - d.setErr(streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)) - return + return streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value) } case "grpc-encoding": d.encoding = f.Value case "grpc-status": code, err := strconv.Atoi(f.Value) if err != nil { - d.setErr(streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)) - return + return streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err) } - d.statusCode = codes.Code(code) + d.rawStatusCode = &code case "grpc-message": - d.statusDesc = decodeGrpcMessage(f.Value) + d.rawStatusMsg = decodeGrpcMessage(f.Value) + case "grpc-status-details-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + } + s := &spb.Status{} + if err := proto.Unmarshal(v, s); err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + } + d.statusGen = status.FromProto(s) case "grpc-timeout": d.timeoutSet = true var err error - d.timeout, err = decodeTimeout(f.Value) - if err != nil { - d.setErr(streamErrorf(codes.Internal, "transport: malformed time-out: %v", err)) - return + if d.timeout, err = decodeTimeout(f.Value); err != nil { + return streamErrorf(codes.Internal, "transport: malformed time-out: %v", err) } case ":path": d.method = f.Value + case ":status": + code, err := strconv.Atoi(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed http-status: %v", err) + } + d.httpStatus = &code default: if !isReservedHeader(f.Name) || isWhitelistedPseudoHeader(f.Name) { - if f.Name == "user-agent" { - i := strings.LastIndex(f.Value, " ") - if i == -1 { - // There is no application user agent string being set. - return - } - // Extract the application user agent string. - f.Value = f.Value[:i] - } if d.mdata == nil { d.mdata = make(map[string][]string) } - k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) + v, err := decodeMetadataHeader(f.Name, f.Value) if err != nil { grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) - return + return nil } - d.mdata[k] = append(d.mdata[k], v) + d.mdata[f.Name] = append(d.mdata[f.Name], v) } } + return nil } type timeoutUnit uint8 @@ -379,6 +476,9 @@ func newFramer(conn net.Conn) *framer { writer: bufio.NewWriterSize(conn, http2IOBufSize), } f.fr = http2.NewFramer(f.writer, f.reader) + // Opt-in to Frame reuse API on framer to reduce garbage. + // Frames aren't safe to read from after a subsequent call to ReadFrame. + f.fr.SetReuseFrames() f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) return f } diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go index caee54a801..063e28f0f8 100644 --- a/vendor/google.golang.org/grpc/transport/transport.go +++ b/vendor/google.golang.org/grpc/transport/transport.go @@ -45,10 +45,13 @@ import ( "sync" "golang.org/x/net/context" + "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" "google.golang.org/grpc/tap" ) @@ -102,6 +105,7 @@ func (b *recvBuffer) load() { if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: + b.backlog[0] = nil b.backlog = b.backlog[1:] default: } @@ -168,11 +172,6 @@ type Stream struct { id uint32 // nil for client side Stream. st ServerTransport - // clientStatsCtx keeps the user context for stats handling. - // It's only valid on client side. Server side stats context is same as s.ctx. - // All client side stats collection should use the clientStatsCtx (instead of the stream context) - // so that all the generated stats for a particular RPC can be associated in the processing phase. - clientStatsCtx context.Context // ctx is the associated context of the stream. ctx context.Context // cancel is always nil for client side Stream. @@ -186,14 +185,17 @@ type Stream struct { recvCompress string sendCompress string buf *recvBuffer - dec io.Reader + trReader io.Reader fc *inFlow recvQuota uint32 + + // TODO: Remote this unused variable. // The accumulated inbound quota pending for window update. updateQuota uint32 - // The handler to control the window update procedure for both this - // particular stream and the associated transport. - windowHandler func(int) + + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if need be. + requestRead func(int) sendQuotaPool *quotaPool // Close headerChan to indicate the end of reception of header metadata. @@ -210,9 +212,17 @@ type Stream struct { // true iff headerChan is closed. Used to avoid closing headerChan // multiple times. headerDone bool - // the status received from the server. - statusCode codes.Code - statusDesc string + // the status error received from the server. + status *status.Status + // rstStream indicates whether a RST_STREAM frame needs to be sent + // to the server to signify that this stream is closing. + rstStream bool + // rstError is the error that needs to be sent along with the RST_STREAM frame. + rstError http2.ErrCode + // bytesSent and bytesReceived indicates whether any bytes have been sent or + // received on this stream. + bytesSent bool + bytesReceived bool } // RecvCompress returns the compression algorithm applied to the inbound @@ -240,7 +250,7 @@ func (s *Stream) GoAway() <-chan struct{} { // Header acquires the key-value pairs of header metadata once it // is available. It blocks until i) the metadata is ready or ii) there is no -// header metadata or iii) the stream is cancelled/expired. +// header metadata or iii) the stream is canceled/expired. func (s *Stream) Header() (metadata.MD, error) { select { case <-s.ctx.Done(): @@ -277,14 +287,9 @@ func (s *Stream) Method() string { return s.method } -// StatusCode returns statusCode received from the server. -func (s *Stream) StatusCode() codes.Code { - return s.statusCode -} - -// StatusDesc returns statusDesc received from the server. -func (s *Stream) StatusDesc() string { - return s.statusDesc +// Status returns the status received from the server. +func (s *Stream) Status() *status.Status { + return s.status } // SetHeader sets the header metadata. This can be called multiple times. @@ -318,19 +323,66 @@ func (s *Stream) write(m recvMsg) { s.buf.put(&m) } -// Read reads all the data available for this Stream from the transport and +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.(*transportReader).er; er != nil { + return 0, er + } + s.requestRead(len(p)) + return io.ReadFull(s.trReader, p) +} + +// tranportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. -func (s *Stream) Read(p []byte) (n int, err error) { - n, err = s.dec.Read(p) +type transportReader struct { + reader io.Reader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) if err != nil { + t.er = err return } - s.windowHandler(n) + t.windowHandler(n) return } +// finish sets the stream's state and status, and closes the done channel. +// s.mu must be held by the caller. st must always be non-nil. +func (s *Stream) finish(st *status.Status) { + s.status = st + s.state = streamDone + close(s.done) +} + +// BytesSent indicates whether any bytes have been sent on this stream. +func (s *Stream) BytesSent() bool { + s.mu.Lock() + defer s.mu.Unlock() + return s.bytesSent +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { + s.mu.Lock() + defer s.mu.Unlock() + return s.bytesReceived +} + +// GoString is implemented by Stream so context.String() won't +// race when printing %#v. +func (s *Stream) GoString() string { + return fmt.Sprintf("", s, s.method) +} + // The key to save transport.Stream in the context. type streamKey struct{} @@ -358,10 +410,14 @@ const ( // ServerConfig consists of all the configurations to establish a server transport. type ServerConfig struct { - MaxStreams uint32 - AuthInfo credentials.AuthInfo - InTapHandle tap.ServerInHandle - StatsHandler stats.Handler + MaxStreams uint32 + AuthInfo credentials.AuthInfo + InTapHandle tap.ServerInHandle + StatsHandler stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 } // NewServerTransport creates a ServerTransport with conn or non-nil error @@ -385,8 +441,14 @@ type ConnectOptions struct { PerRPCCredentials []credentials.PerRPCCredentials // TransportCredentials stores the Authenticator required to setup a client connection. TransportCredentials credentials.TransportCredentials + // KeepaliveParams stores the keepalive parameters. + KeepaliveParams keepalive.ClientParameters // StatsHandler stores the handler for stats. StatsHandler stats.Handler + // InitialWindowSize sets the intial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the intial window size for a connection. + InitialConnWindowSize int32 } // TargetInfo contains the information of the target such as network address and metadata. @@ -430,6 +492,9 @@ type CallHdr struct { // outbound message. SendCompress string + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + // Flush indicates whether a new stream command should be sent // to the peer without waiting for the first data. This is // only a hint. The transport may modify the flush decision @@ -469,10 +534,13 @@ type ClientTransport interface { // once the transport is initiated. Error() <-chan struct{} - // GoAway returns a channel that is closed when ClientTranspor + // GoAway returns a channel that is closed when ClientTransport // receives the draining signal from the server (e.g., GOAWAY frame in // HTTP/2). GoAway() <-chan struct{} + + // GetGoAwayReason returns the reason why GoAway frame was received. + GetGoAwayReason() GoAwayReason } // ServerTransport is the common interface for all gRPC server-side transport @@ -492,10 +560,9 @@ type ServerTransport interface { // Write may not be called on all streams. Write(s *Stream, data []byte, opts *Options) error - // WriteStatus sends the status of a stream to the client. - // WriteStatus is the final call made on a stream and always - // occurs. - WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error + // WriteStatus sends the status of a stream to the client. WriteStatus is + // the final call made on a stream and always occurs. + WriteStatus(s *Stream, st *status.Status) error // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their @@ -561,6 +628,8 @@ var ( ErrStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs") ) +// TODO: See if we can replace StreamError with status package errors. + // StreamError is an error that only affects one stream within a connection. type StreamError struct { Code codes.Code @@ -568,18 +637,7 @@ type StreamError struct { } func (e StreamError) Error() string { - return fmt.Sprintf("stream error: code = %d desc = %q", e.Code, e.Desc) -} - -// ContextErr converts the error from context package into a StreamError. -func ContextErr(err error) StreamError { - switch err { - case context.DeadlineExceeded: - return streamErrorf(codes.DeadlineExceeded, "%v", err) - case context.Canceled: - return streamErrorf(codes.Canceled, "%v", err) - } - panic(fmt.Sprintf("Unexpected error from context packet: %v", err)) + return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc) } // wait blocks until it can receive from ctx.Done, closing, or proceed. @@ -609,3 +667,16 @@ func wait(ctx context.Context, done, goAway, closing <-chan struct{}, proceed <- return i, nil } } + +// GoAwayReason contains the reason for the GoAway frame received. +type GoAwayReason uint8 + +const ( + // Invalid indicates that no GoAway frame is received. + Invalid GoAwayReason = 0 + // NoReason is the default value when GoAway frame is received. + NoReason GoAwayReason = 1 + // TooManyPings indicates that a GoAway frame with ErrCodeEnhanceYourCalm + // was recieved and that the debug data said "too_many_pings". + TooManyPings GoAwayReason = 2 +) diff --git a/vendor/manifest b/vendor/manifest index d486c8cffb..4bbf5e9f22 100644 --- a/vendor/manifest +++ b/vendor/manifest @@ -892,8 +892,8 @@ { "importpath": "github.com/golang/protobuf/proto", "repository": "https://github.com/golang/protobuf", - "vcs": "", - "revision": "5baca1b63153b1a82014546382edbdd302b138b6", + "vcs": "git", + "revision": "5a0f697c9ed9d68fef0116532c6e05cfeae00e55", "branch": "master", "path": "/proto" }, @@ -906,6 +906,15 @@ "path": "/protoc-gen-go/descriptor", "notests": true }, + { + "importpath": "github.com/golang/protobuf/ptypes", + "repository": "https://github.com/golang/protobuf", + "vcs": "git", + "revision": "5a0f697c9ed9d68fef0116532c6e05cfeae00e55", + "branch": "master", + "path": "/ptypes", + "notests": true + }, { "importpath": "github.com/google/cadvisor/info/v1", "repository": "https://github.com/google/cadvisor", @@ -1431,7 +1440,7 @@ "importpath": "github.com/weaveworks/common", "repository": "https://github.com/weaveworks/common", "vcs": "git", - "revision": "2faced4ddea5ec3b1ff8e88ba552714e3088cf41", + "revision": "493a1f760f47ed3b50afd5baabb36589d96017b8", "branch": "master", "notests": true }, @@ -1552,7 +1561,7 @@ "importpath": "golang.org/x/net/http2", "repository": "https://go.googlesource.com/net", "vcs": "git", - "revision": "dd2d9a67c97da0afa00d5726e28086007a0acce5", + "revision": "59a0b19b5533c7977ddeb86b017bf507ed407b12", "branch": "master", "path": "http2", "notests": true @@ -1694,11 +1703,20 @@ "branch": "master", "notests": true }, + { + "importpath": "google.golang.org/genproto/googleapis/rpc/status", + "repository": "https://github.com/google/go-genproto", + "vcs": "git", + "revision": "aa2eb687b4d3e17154372564ad8d6bf11c3cf21f", + "branch": "master", + "path": "/googleapis/rpc/status", + "notests": true + }, { "importpath": "google.golang.org/grpc", "repository": "https://github.com/grpc/grpc-go", "vcs": "git", - "revision": "34384f34de585705f1a6783a158d2ec8af29f618", + "revision": "06c984861f3044e61f84671f9fe660da0fdf4997", "branch": "master", "notests": true },