diff --git a/config/config.go b/config/config.go index 85f8780dfe..09d341c0e4 100644 --- a/config/config.go +++ b/config/config.go @@ -25,6 +25,10 @@ type Config interface { // peer traffic GetPeerListenAddr() (string, error) + // GetGRPCListenAddr returns the address and port on which to listen for + // incoming events over gRPC + GetGRPCListenAddr() (string, error) + // GetAPIKeys returns a list of Honeycomb API keys GetAPIKeys() ([]string, error) diff --git a/config/file_config.go b/config/file_config.go index 48e55e0349..dc18a6fce5 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -53,8 +53,9 @@ func (r *RulesBasedSamplerConfig) String() string { } type configContents struct { - ListenAddr string `validate:"required"` - PeerListenAddr string `validate:"required"` + ListenAddr string `validate:"required"` + PeerListenAddr string `validate:"required"` + GRPCListenAddr string APIKeys []string `validate:"required"` HoneycombAPI string `validate:"required,url"` Logger string `validate:"required,oneof= logrus honeycomb"` @@ -379,6 +380,20 @@ func (f *fileConfig) GetPeerListenAddr() (string, error) { return f.conf.PeerListenAddr, nil } +func (f *fileConfig) GetGRPCListenAddr() (string, error) { + f.mux.RLock() + defer f.mux.RUnlock() + + // GRPC listen addr is optional, only check value is valid if not empty + if f.conf.GRPCListenAddr != "" { + _, _, err := net.SplitHostPort(f.conf.GRPCListenAddr) + if err != nil { + return "", err + } + } + return f.conf.GRPCListenAddr, nil +} + func (f *fileConfig) GetAPIKeys() ([]string, error) { f.mux.RLock() defer f.mux.RUnlock() diff --git a/config/mock.go b/config/mock.go index 16f2e3b47b..16a41747db 100644 --- a/config/mock.go +++ b/config/mock.go @@ -22,6 +22,8 @@ type MockConfig struct { GetListenAddrVal string GetPeerListenAddrErr error GetPeerListenAddrVal string + GetGRPCListenAddrErr error + GetGRPCListenAddrVal string GetLoggerTypeErr error GetLoggerTypeVal string GetHoneycombLoggerConfigErr error @@ -114,6 +116,12 @@ func (m *MockConfig) GetPeerListenAddr() (string, error) { return m.GetPeerListenAddrVal, m.GetPeerListenAddrErr } +func (m *MockConfig) GetGRPCListenAddr() (string, error) { + m.Mux.RLock() + defer m.Mux.RUnlock() + + return m.GetGRPCListenAddrVal, m.GetGRPCListenAddrErr +} func (m *MockConfig) GetLoggerType() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() diff --git a/config_complete.toml b/config_complete.toml index e965ce9a77..7ff5ae0f69 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -9,6 +9,13 @@ # Not eligible for live reload. ListenAddr = "0.0.0.0:8080" +# GRPCListenAddr is the IP and port on which to listen for incoming events over +# gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put +# something like nginx in front to do the decryption. +# Should be of the form 0.0.0.0:9090 +# Not eligible for live reload. +GRPCListenAddr = "0.0.0.0:9090" + # PeerListenAddr is the IP and port on which to listen for traffic being # rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL # put something like nginx in front to do the decryption. Must be different from diff --git a/go.mod b/go.mod index 276bd77a6b..e0f94849c6 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,10 @@ require ( github.com/garyburd/redigo v1.6.0 github.com/go-playground/universal-translator v0.17.0 // indirect github.com/go-playground/validator v9.31.0+incompatible + github.com/gogo/protobuf v1.3.1 + github.com/golang/protobuf v1.4.3 github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d + github.com/grpc-ecosystem/grpc-gateway v1.12.1 github.com/hashicorp/golang-lru v0.5.1 github.com/honeycombio/dynsampler-go v0.2.1 github.com/honeycombio/libhoney-go v1.12.4 @@ -35,6 +38,7 @@ require ( github.com/vmihailenco/msgpack/v4 v4.3.11 golang.org/x/sys v0.0.0-20200722175500-76b94024e4b6 // indirect golang.org/x/text v0.3.3 // indirect + google.golang.org/grpc v1.32.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/ini.v1 v1.57.0 // indirect diff --git a/go.sum b/go.sum index aed211cfef..40ad21cbe9 100644 --- a/go.sum +++ b/go.sum @@ -20,6 +20,7 @@ github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -29,8 +30,10 @@ github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -41,6 +44,9 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= @@ -80,6 +86,8 @@ github.com/go-playground/validator v9.31.0+incompatible/go.mod h1:yrEkQXlcI+Pugk github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -90,13 +98,23 @@ github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -111,6 +129,8 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.12.1 h1:zCy2xE9ablevUOrUZc3Dl72Dt+ya2FNAvC2yLYMHzi4= +github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -148,6 +168,7 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.10.3 h1:OP96hzwJVBIHYU52pVTI6CczrxPvrGfgqF9N5eTO0Q8= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= @@ -204,6 +225,8 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -215,6 +238,7 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -312,6 +336,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0O golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -353,6 +378,7 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -361,6 +387,7 @@ golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -369,6 +396,7 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -388,10 +416,24 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a h1:Ob5/580gVHBJZgXnff1cZDbG+xLtMVE5mDRTe+nIsX4= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.32.0 h1:zWTV+LMdc3kaiJMSTOFz2UgSBgx8RNQoTGiZu3fR9S0= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/alexcesaro/statsd.v2 v2.0.0/go.mod h1:i0ubccKGzBVNBpdGV5MocxyA/XlLUJzA7SLonnE4drU= @@ -410,11 +452,13 @@ gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/internal/opentelemetry-proto-gen/README.md b/internal/opentelemetry-proto-gen/README.md new file mode 100644 index 0000000000..5cfc56f03c --- /dev/null +++ b/internal/opentelemetry-proto-gen/README.md @@ -0,0 +1,3 @@ +# OTLP Protobuf Definitions + +The definitions can be found [here](https://github.com/open-telemetry/opentelemetry-proto/tree/59c488bfb8fb6d0458ad6425758b70259ff4a2bd). \ No newline at end of file diff --git a/internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.go b/internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.go new file mode 100644 index 0000000000..0ee0db23ce --- /dev/null +++ b/internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.go @@ -0,0 +1,213 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/collector/logs/v1/logs_service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/logs/v1" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ExportLogsServiceRequest struct { + // An array of ResourceLogs. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + ResourceLogs []*v1.ResourceLogs `protobuf:"bytes,1,rep,name=resource_logs,json=resourceLogs,proto3" json:"resource_logs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportLogsServiceRequest) Reset() { *m = ExportLogsServiceRequest{} } +func (m *ExportLogsServiceRequest) String() string { return proto.CompactTextString(m) } +func (*ExportLogsServiceRequest) ProtoMessage() {} +func (*ExportLogsServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8e3bf87aaa43acd4, []int{0} +} +func (m *ExportLogsServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportLogsServiceRequest.Unmarshal(m, b) +} +func (m *ExportLogsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportLogsServiceRequest.Marshal(b, m, deterministic) +} +func (m *ExportLogsServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportLogsServiceRequest.Merge(m, src) +} +func (m *ExportLogsServiceRequest) XXX_Size() int { + return xxx_messageInfo_ExportLogsServiceRequest.Size(m) +} +func (m *ExportLogsServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExportLogsServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportLogsServiceRequest proto.InternalMessageInfo + +func (m *ExportLogsServiceRequest) GetResourceLogs() []*v1.ResourceLogs { + if m != nil { + return m.ResourceLogs + } + return nil +} + +type ExportLogsServiceResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportLogsServiceResponse) Reset() { *m = ExportLogsServiceResponse{} } +func (m *ExportLogsServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ExportLogsServiceResponse) ProtoMessage() {} +func (*ExportLogsServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8e3bf87aaa43acd4, []int{1} +} +func (m *ExportLogsServiceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportLogsServiceResponse.Unmarshal(m, b) +} +func (m *ExportLogsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportLogsServiceResponse.Marshal(b, m, deterministic) +} +func (m *ExportLogsServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportLogsServiceResponse.Merge(m, src) +} +func (m *ExportLogsServiceResponse) XXX_Size() int { + return xxx_messageInfo_ExportLogsServiceResponse.Size(m) +} +func (m *ExportLogsServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExportLogsServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportLogsServiceResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ExportLogsServiceRequest)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest") + proto.RegisterType((*ExportLogsServiceResponse)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsServiceResponse") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/collector/logs/v1/logs_service.proto", fileDescriptor_8e3bf87aaa43acd4) +} + +var fileDescriptor_8e3bf87aaa43acd4 = []byte{ + // 263 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0xc8, 0x2f, 0x48, 0xcd, + 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, + 0x4f, 0xce, 0xcf, 0xc9, 0x49, 0x4d, 0x2e, 0xc9, 0x2f, 0xd2, 0xcf, 0xc9, 0x4f, 0x2f, 0xd6, 0x2f, + 0x33, 0x04, 0xd3, 0xf1, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9, 0xa9, 0x7a, 0x60, 0x45, 0x42, 0xaa, + 0x28, 0x3a, 0x21, 0x82, 0x7a, 0x70, 0x9d, 0x7a, 0x20, 0x1d, 0x7a, 0x65, 0x86, 0x52, 0x6a, 0xd8, + 0x2c, 0x40, 0x36, 0x16, 0xa2, 0x53, 0x29, 0x8b, 0x4b, 0xc2, 0xb5, 0xa2, 0x20, 0xbf, 0xa8, 0xc4, + 0x27, 0x3f, 0xbd, 0x38, 0x18, 0x62, 0x53, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x90, 0x1f, + 0x17, 0x6f, 0x51, 0x6a, 0x71, 0x7e, 0x69, 0x51, 0x72, 0x6a, 0x3c, 0x48, 0x8b, 0x04, 0xa3, 0x02, + 0xb3, 0x06, 0xb7, 0x91, 0xa6, 0x1e, 0x36, 0x27, 0x40, 0x2d, 0xd6, 0x0b, 0x82, 0xea, 0x00, 0x99, + 0x17, 0xc4, 0x53, 0x84, 0xc4, 0x53, 0x92, 0xe6, 0x92, 0xc4, 0x62, 0x57, 0x71, 0x41, 0x7e, 0x5e, + 0x71, 0xaa, 0xd1, 0x5c, 0x46, 0x2e, 0x6e, 0x24, 0x71, 0xa1, 0x5e, 0x46, 0x2e, 0x36, 0x88, 0x6a, + 0x21, 0x7b, 0x3d, 0xa2, 0xfc, 0xac, 0x87, 0xcb, 0x23, 0x52, 0x0e, 0xe4, 0x1b, 0x00, 0x71, 0x9d, + 0x12, 0x83, 0x53, 0x1b, 0x23, 0x97, 0x46, 0x66, 0x3e, 0x71, 0x06, 0x39, 0x09, 0x20, 0x99, 0x11, + 0x00, 0x52, 0x13, 0xc0, 0x18, 0xe5, 0x96, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, + 0xab, 0x0f, 0x32, 0x45, 0x17, 0x11, 0x3b, 0x28, 0x86, 0xea, 0x42, 0xe2, 0x2a, 0x3d, 0x35, 0x4f, + 0x3f, 0x1d, 0x4b, 0x9a, 0x48, 0x62, 0x03, 0xcb, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x49, + 0xa7, 0x2f, 0x4a, 0x43, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// LogsServiceClient is the client API for LogsService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type LogsServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error) +} + +type logsServiceClient struct { + cc *grpc.ClientConn +} + +func NewLogsServiceClient(cc *grpc.ClientConn) LogsServiceClient { + return &logsServiceClient{cc} +} + +func (c *logsServiceClient) Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error) { + out := new(ExportLogsServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.logs.v1.LogsService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// LogsServiceServer is the server API for LogsService service. +type LogsServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error) +} + +// UnimplementedLogsServiceServer can be embedded to have forward compatible implementations. +type UnimplementedLogsServiceServer struct { +} + +func (*UnimplementedLogsServiceServer) Export(ctx context.Context, req *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterLogsServiceServer(s *grpc.Server, srv LogsServiceServer) { + s.RegisterService(&_LogsService_serviceDesc, srv) +} + +func _LogsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportLogsServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LogsServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.logs.v1.LogsService/Export", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LogsServiceServer).Export(ctx, req.(*ExportLogsServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _LogsService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.logs.v1.LogsService", + HandlerType: (*LogsServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: _LogsService_Export_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/logs/v1/logs_service.proto", +} diff --git a/internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.gw.go b/internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.gw.go new file mode 100644 index 0000000000..8003733add --- /dev/null +++ b/internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.gw.go @@ -0,0 +1,163 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opentelemetry/proto/collector/logs/v1/logs_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage + +func request_LogsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client LogsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportLogsServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_LogsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server LogsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportLogsServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Export(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterLogsServiceHandlerServer registers the http handlers for service LogsService to "mux". +// UnaryRPC :call LogsServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +func RegisterLogsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server LogsServiceServer) error { + + mux.Handle("POST", pattern_LogsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_LogsService_Export_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_LogsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterLogsServiceHandlerFromEndpoint is same as RegisterLogsServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterLogsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterLogsServiceHandler(ctx, mux, conn) +} + +// RegisterLogsServiceHandler registers the http handlers for service LogsService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterLogsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterLogsServiceHandlerClient(ctx, mux, NewLogsServiceClient(conn)) +} + +// RegisterLogsServiceHandlerClient registers the http handlers for service LogsService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "LogsServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LogsServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "LogsServiceClient" to call the correct interceptors. +func RegisterLogsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client LogsServiceClient) error { + + mux.Handle("POST", pattern_LogsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_LogsService_Export_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_LogsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_LogsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "logs"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_LogsService_Export_0 = runtime.ForwardResponseMessage +) diff --git a/internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.go b/internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.go new file mode 100644 index 0000000000..2fe6fe69b0 --- /dev/null +++ b/internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.go @@ -0,0 +1,213 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/metrics/v1" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ExportMetricsServiceRequest struct { + // An array of ResourceMetrics. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + ResourceMetrics []*v1.ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} } +func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) } +func (*ExportMetricsServiceRequest) ProtoMessage() {} +func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_75fb6015e6e64798, []int{0} +} +func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportMetricsServiceRequest.Unmarshal(m, b) +} +func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic) +} +func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src) +} +func (m *ExportMetricsServiceRequest) XXX_Size() int { + return xxx_messageInfo_ExportMetricsServiceRequest.Size(m) +} +func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo + +func (m *ExportMetricsServiceRequest) GetResourceMetrics() []*v1.ResourceMetrics { + if m != nil { + return m.ResourceMetrics + } + return nil +} + +type ExportMetricsServiceResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} } +func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ExportMetricsServiceResponse) ProtoMessage() {} +func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_75fb6015e6e64798, []int{1} +} +func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportMetricsServiceResponse.Unmarshal(m, b) +} +func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic) +} +func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src) +} +func (m *ExportMetricsServiceResponse) XXX_Size() int { + return xxx_messageInfo_ExportMetricsServiceResponse.Size(m) +} +func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest") + proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/collector/metrics/v1/metrics_service.proto", fileDescriptor_75fb6015e6e64798) +} + +var fileDescriptor_75fb6015e6e64798 = []byte{ + // 264 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0xcb, 0x2f, 0x48, 0xcd, + 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, + 0x4f, 0xce, 0xcf, 0xc9, 0x49, 0x4d, 0x2e, 0xc9, 0x2f, 0xd2, 0x07, 0x89, 0x66, 0x26, 0x17, 0xeb, + 0x97, 0x19, 0xc2, 0x98, 0xf1, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9, 0xa9, 0x7a, 0x60, 0xa5, 0x42, + 0x1a, 0x28, 0xfa, 0x21, 0x82, 0x7a, 0x70, 0xfd, 0x7a, 0x50, 0x4d, 0x7a, 0x65, 0x86, 0x52, 0x3a, + 0xd8, 0x6c, 0xc2, 0x34, 0x1f, 0x62, 0x84, 0x52, 0x25, 0x97, 0xb4, 0x6b, 0x45, 0x41, 0x7e, 0x51, + 0x89, 0x2f, 0x44, 0x38, 0x18, 0x62, 0x6b, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x50, 0x14, + 0x97, 0x40, 0x51, 0x6a, 0x71, 0x7e, 0x69, 0x51, 0x72, 0x6a, 0x3c, 0x54, 0xa3, 0x04, 0xa3, 0x02, + 0xb3, 0x06, 0xb7, 0x91, 0xbe, 0x1e, 0x36, 0x17, 0x21, 0xdc, 0xa1, 0x17, 0x04, 0xd5, 0x07, 0x35, + 0x38, 0x88, 0xbf, 0x08, 0x55, 0x40, 0x49, 0x8e, 0x4b, 0x06, 0xbb, 0xd5, 0xc5, 0x05, 0xf9, 0x79, + 0xc5, 0xa9, 0x46, 0x6b, 0x18, 0xb9, 0xf8, 0x50, 0xa5, 0x84, 0x66, 0x32, 0x72, 0xb1, 0x41, 0xf4, + 0x08, 0xb9, 0xea, 0x11, 0x1b, 0x22, 0x7a, 0x78, 0x3c, 0x28, 0xe5, 0x46, 0xa9, 0x31, 0x10, 0xc7, + 0x2a, 0x31, 0x38, 0xf5, 0x33, 0x72, 0x69, 0x67, 0xe6, 0x13, 0x6d, 0x9c, 0x93, 0x30, 0xaa, 0x49, + 0x01, 0x20, 0x95, 0x01, 0x8c, 0x51, 0x9e, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, + 0xb9, 0xfa, 0x20, 0xb3, 0x74, 0x11, 0x51, 0x89, 0x62, 0xb4, 0x2e, 0x24, 0x62, 0xd3, 0x53, 0xf3, + 0xf4, 0xd3, 0xb1, 0xa7, 0xa4, 0x24, 0x36, 0xb0, 0x12, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, + 0xaa, 0xdd, 0xdf, 0x49, 0x7c, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MetricsServiceClient is the client API for MetricsService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetricsServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) +} + +type metricsServiceClient struct { + cc *grpc.ClientConn +} + +func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient { + return &metricsServiceClient{cc} +} + +func (c *metricsServiceClient) Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) { + out := new(ExportMetricsServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MetricsServiceServer is the server API for MetricsService service. +type MetricsServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) +} + +// UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations. +type UnimplementedMetricsServiceServer struct { +} + +func (*UnimplementedMetricsServiceServer) Export(ctx context.Context, req *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { + s.RegisterService(&_MetricsService_serviceDesc, srv) +} + +func _MetricsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportMetricsServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricsServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricsServiceServer).Export(ctx, req.(*ExportMetricsServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _MetricsService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.metrics.v1.MetricsService", + HandlerType: (*MetricsServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: _MetricsService_Export_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/metrics/v1/metrics_service.proto", +} diff --git a/internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.gw.go b/internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.gw.go new file mode 100644 index 0000000000..8158c98a62 --- /dev/null +++ b/internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.gw.go @@ -0,0 +1,163 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage + +func request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client MetricsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportMetricsServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server MetricsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportMetricsServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Export(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterMetricsServiceHandlerServer registers the http handlers for service MetricsService to "mux". +// UnaryRPC :call MetricsServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +func RegisterMetricsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server MetricsServiceServer) error { + + mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_MetricsService_Export_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_MetricsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterMetricsServiceHandlerFromEndpoint is same as RegisterMetricsServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterMetricsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterMetricsServiceHandler(ctx, mux, conn) +} + +// RegisterMetricsServiceHandler registers the http handlers for service MetricsService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterMetricsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterMetricsServiceHandlerClient(ctx, mux, NewMetricsServiceClient(conn)) +} + +// RegisterMetricsServiceHandlerClient registers the http handlers for service MetricsService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "MetricsServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MetricsServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "MetricsServiceClient" to call the correct interceptors. +func RegisterMetricsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client MetricsServiceClient) error { + + mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_MetricsService_Export_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_MetricsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_MetricsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_MetricsService_Export_0 = runtime.ForwardResponseMessage +) diff --git a/internal/opentelemetry-proto-gen/collector/trace/v1/trace_config.pb.go b/internal/opentelemetry-proto-gen/collector/trace/v1/trace_config.pb.go new file mode 100644 index 0000000000..aa4bfb6b00 --- /dev/null +++ b/internal/opentelemetry-proto-gen/collector/trace/v1/trace_config.pb.go @@ -0,0 +1,361 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/trace/v1/trace_config.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// How spans should be sampled: +// - Always off +// - Always on +// - Always follow the parent Span's decision (off if no parent). +type ConstantSampler_ConstantDecision int32 + +const ( + ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0 + ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1 + ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2 +) + +var ConstantSampler_ConstantDecision_name = map[int32]string{ + 0: "ALWAYS_OFF", + 1: "ALWAYS_ON", + 2: "ALWAYS_PARENT", +} + +var ConstantSampler_ConstantDecision_value = map[string]int32{ + "ALWAYS_OFF": 0, + "ALWAYS_ON": 1, + "ALWAYS_PARENT": 2, +} + +func (x ConstantSampler_ConstantDecision) String() string { + return proto.EnumName(ConstantSampler_ConstantDecision_name, int32(x)) +} + +func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5936aa8fa6443e6f, []int{1, 0} +} + +// Global configuration of the trace service. All fields must be specified, or +// the default (zero) values will be used for each type. +type TraceConfig struct { + // The global default sampler used to make decisions on span sampling. + // + // Types that are valid to be assigned to Sampler: + // *TraceConfig_ConstantSampler + // *TraceConfig_TraceIdRatioBased + // *TraceConfig_RateLimitingSampler + Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"` + // The global default max number of attributes per span. + MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"` + // The global default max number of annotation events per span. + MaxNumberOfTimedEvents int64 `protobuf:"varint,5,opt,name=max_number_of_timed_events,json=maxNumberOfTimedEvents,proto3" json:"max_number_of_timed_events,omitempty"` + // The global default max number of attributes per timed event. + MaxNumberOfAttributesPerTimedEvent int64 `protobuf:"varint,6,opt,name=max_number_of_attributes_per_timed_event,json=maxNumberOfAttributesPerTimedEvent,proto3" json:"max_number_of_attributes_per_timed_event,omitempty"` + // The global default max number of link entries per span. + MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"` + // The global default max number of attributes per span. + MaxNumberOfAttributesPerLink int64 `protobuf:"varint,8,opt,name=max_number_of_attributes_per_link,json=maxNumberOfAttributesPerLink,proto3" json:"max_number_of_attributes_per_link,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TraceConfig) Reset() { *m = TraceConfig{} } +func (m *TraceConfig) String() string { return proto.CompactTextString(m) } +func (*TraceConfig) ProtoMessage() {} +func (*TraceConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_5936aa8fa6443e6f, []int{0} +} +func (m *TraceConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TraceConfig.Unmarshal(m, b) +} +func (m *TraceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TraceConfig.Marshal(b, m, deterministic) +} +func (m *TraceConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_TraceConfig.Merge(m, src) +} +func (m *TraceConfig) XXX_Size() int { + return xxx_messageInfo_TraceConfig.Size(m) +} +func (m *TraceConfig) XXX_DiscardUnknown() { + xxx_messageInfo_TraceConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_TraceConfig proto.InternalMessageInfo + +type isTraceConfig_Sampler interface { + isTraceConfig_Sampler() +} + +type TraceConfig_ConstantSampler struct { + ConstantSampler *ConstantSampler `protobuf:"bytes,1,opt,name=constant_sampler,json=constantSampler,proto3,oneof" json:"constant_sampler,omitempty"` +} +type TraceConfig_TraceIdRatioBased struct { + TraceIdRatioBased *TraceIdRatioBased `protobuf:"bytes,2,opt,name=trace_id_ratio_based,json=traceIdRatioBased,proto3,oneof" json:"trace_id_ratio_based,omitempty"` +} +type TraceConfig_RateLimitingSampler struct { + RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof" json:"rate_limiting_sampler,omitempty"` +} + +func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {} +func (*TraceConfig_TraceIdRatioBased) isTraceConfig_Sampler() {} +func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {} + +func (m *TraceConfig) GetSampler() isTraceConfig_Sampler { + if m != nil { + return m.Sampler + } + return nil +} + +func (m *TraceConfig) GetConstantSampler() *ConstantSampler { + if x, ok := m.GetSampler().(*TraceConfig_ConstantSampler); ok { + return x.ConstantSampler + } + return nil +} + +func (m *TraceConfig) GetTraceIdRatioBased() *TraceIdRatioBased { + if x, ok := m.GetSampler().(*TraceConfig_TraceIdRatioBased); ok { + return x.TraceIdRatioBased + } + return nil +} + +func (m *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler { + if x, ok := m.GetSampler().(*TraceConfig_RateLimitingSampler); ok { + return x.RateLimitingSampler + } + return nil +} + +func (m *TraceConfig) GetMaxNumberOfAttributes() int64 { + if m != nil { + return m.MaxNumberOfAttributes + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfTimedEvents() int64 { + if m != nil { + return m.MaxNumberOfTimedEvents + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfAttributesPerTimedEvent() int64 { + if m != nil { + return m.MaxNumberOfAttributesPerTimedEvent + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfLinks() int64 { + if m != nil { + return m.MaxNumberOfLinks + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfAttributesPerLink() int64 { + if m != nil { + return m.MaxNumberOfAttributesPerLink + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*TraceConfig) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*TraceConfig_ConstantSampler)(nil), + (*TraceConfig_TraceIdRatioBased)(nil), + (*TraceConfig_RateLimitingSampler)(nil), + } +} + +// Sampler that always makes a constant decision on span sampling. +type ConstantSampler struct { + Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opentelemetry.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConstantSampler) Reset() { *m = ConstantSampler{} } +func (m *ConstantSampler) String() string { return proto.CompactTextString(m) } +func (*ConstantSampler) ProtoMessage() {} +func (*ConstantSampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5936aa8fa6443e6f, []int{1} +} +func (m *ConstantSampler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConstantSampler.Unmarshal(m, b) +} +func (m *ConstantSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConstantSampler.Marshal(b, m, deterministic) +} +func (m *ConstantSampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConstantSampler.Merge(m, src) +} +func (m *ConstantSampler) XXX_Size() int { + return xxx_messageInfo_ConstantSampler.Size(m) +} +func (m *ConstantSampler) XXX_DiscardUnknown() { + xxx_messageInfo_ConstantSampler.DiscardUnknown(m) +} + +var xxx_messageInfo_ConstantSampler proto.InternalMessageInfo + +func (m *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision { + if m != nil { + return m.Decision + } + return ConstantSampler_ALWAYS_OFF +} + +// Sampler that tries to uniformly sample traces with a given ratio. +// The ratio of sampling a trace is equal to that of the specified ratio. +type TraceIdRatioBased struct { + // The desired ratio of sampling. Must be within [0.0, 1.0]. + SamplingRatio float64 `protobuf:"fixed64,1,opt,name=samplingRatio,proto3" json:"samplingRatio,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TraceIdRatioBased) Reset() { *m = TraceIdRatioBased{} } +func (m *TraceIdRatioBased) String() string { return proto.CompactTextString(m) } +func (*TraceIdRatioBased) ProtoMessage() {} +func (*TraceIdRatioBased) Descriptor() ([]byte, []int) { + return fileDescriptor_5936aa8fa6443e6f, []int{2} +} +func (m *TraceIdRatioBased) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TraceIdRatioBased.Unmarshal(m, b) +} +func (m *TraceIdRatioBased) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TraceIdRatioBased.Marshal(b, m, deterministic) +} +func (m *TraceIdRatioBased) XXX_Merge(src proto.Message) { + xxx_messageInfo_TraceIdRatioBased.Merge(m, src) +} +func (m *TraceIdRatioBased) XXX_Size() int { + return xxx_messageInfo_TraceIdRatioBased.Size(m) +} +func (m *TraceIdRatioBased) XXX_DiscardUnknown() { + xxx_messageInfo_TraceIdRatioBased.DiscardUnknown(m) +} + +var xxx_messageInfo_TraceIdRatioBased proto.InternalMessageInfo + +func (m *TraceIdRatioBased) GetSamplingRatio() float64 { + if m != nil { + return m.SamplingRatio + } + return 0 +} + +// Sampler that tries to sample with a rate per time window. +type RateLimitingSampler struct { + // Rate per second. + Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RateLimitingSampler) Reset() { *m = RateLimitingSampler{} } +func (m *RateLimitingSampler) String() string { return proto.CompactTextString(m) } +func (*RateLimitingSampler) ProtoMessage() {} +func (*RateLimitingSampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5936aa8fa6443e6f, []int{3} +} +func (m *RateLimitingSampler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RateLimitingSampler.Unmarshal(m, b) +} +func (m *RateLimitingSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RateLimitingSampler.Marshal(b, m, deterministic) +} +func (m *RateLimitingSampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimitingSampler.Merge(m, src) +} +func (m *RateLimitingSampler) XXX_Size() int { + return xxx_messageInfo_RateLimitingSampler.Size(m) +} +func (m *RateLimitingSampler) XXX_DiscardUnknown() { + xxx_messageInfo_RateLimitingSampler.DiscardUnknown(m) +} + +var xxx_messageInfo_RateLimitingSampler proto.InternalMessageInfo + +func (m *RateLimitingSampler) GetQps() int64 { + if m != nil { + return m.Qps + } + return 0 +} + +func init() { + proto.RegisterEnum("opentelemetry.proto.trace.v1.ConstantSampler_ConstantDecision", ConstantSampler_ConstantDecision_name, ConstantSampler_ConstantDecision_value) + proto.RegisterType((*TraceConfig)(nil), "opentelemetry.proto.trace.v1.TraceConfig") + proto.RegisterType((*ConstantSampler)(nil), "opentelemetry.proto.trace.v1.ConstantSampler") + proto.RegisterType((*TraceIdRatioBased)(nil), "opentelemetry.proto.trace.v1.TraceIdRatioBased") + proto.RegisterType((*RateLimitingSampler)(nil), "opentelemetry.proto.trace.v1.RateLimitingSampler") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/trace/v1/trace_config.proto", fileDescriptor_5936aa8fa6443e6f) +} + +var fileDescriptor_5936aa8fa6443e6f = []byte{ + // 519 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0x4d, 0x6b, 0xdb, 0x40, + 0x10, 0x86, 0xad, 0xb8, 0xf9, 0x9a, 0xe0, 0x44, 0x5e, 0x37, 0x45, 0x94, 0x40, 0x53, 0x51, 0xa8, + 0x2f, 0x96, 0x70, 0x7a, 0x28, 0xed, 0xa1, 0x60, 0xe7, 0xb3, 0x60, 0x1c, 0xa3, 0x18, 0x4a, 0x7d, + 0x59, 0x56, 0xf2, 0x5a, 0x5d, 0x2a, 0xed, 0xba, 0xab, 0xb5, 0x49, 0x2f, 0x3d, 0xf5, 0x1f, 0xf5, + 0x0f, 0x16, 0xad, 0x54, 0xd9, 0x72, 0x12, 0x41, 0x6f, 0x9a, 0x79, 0xf7, 0x7d, 0x66, 0x06, 0x8f, + 0x07, 0x5c, 0x31, 0xa7, 0x5c, 0xd1, 0x88, 0xc6, 0x54, 0xc9, 0x9f, 0xee, 0x5c, 0x0a, 0x25, 0x5c, + 0x25, 0x49, 0x40, 0xdd, 0x65, 0x37, 0xfb, 0xc0, 0x81, 0xe0, 0x33, 0x16, 0x3a, 0x5a, 0x43, 0x27, + 0x25, 0x43, 0x96, 0x74, 0xf4, 0x3b, 0x67, 0xd9, 0xb5, 0x7f, 0x6f, 0xc3, 0xc1, 0x38, 0x0d, 0xce, + 0xb5, 0x07, 0x4d, 0xc0, 0x0c, 0x04, 0x4f, 0x14, 0xe1, 0x0a, 0x27, 0x24, 0x9e, 0x47, 0x54, 0x5a, + 0xc6, 0xa9, 0xd1, 0x3e, 0x38, 0xeb, 0x38, 0x55, 0x20, 0xe7, 0x3c, 0x77, 0xdd, 0x65, 0xa6, 0x9b, + 0x9a, 0x77, 0x14, 0x94, 0x53, 0xc8, 0x87, 0xe7, 0x59, 0x7f, 0x6c, 0x8a, 0x25, 0x51, 0x4c, 0x60, + 0x9f, 0x24, 0x74, 0x6a, 0x6d, 0x69, 0xbe, 0x5b, 0xcd, 0xd7, 0x4d, 0x7e, 0x9e, 0x7a, 0xa9, 0xaf, + 0x9f, 0xda, 0x6e, 0x6a, 0x5e, 0x53, 0x6d, 0x26, 0x51, 0x08, 0xc7, 0x92, 0x28, 0x8a, 0x23, 0x16, + 0x33, 0xc5, 0x78, 0x58, 0x0c, 0x51, 0xd7, 0x45, 0xba, 0xd5, 0x45, 0x3c, 0xa2, 0xe8, 0x20, 0x77, + 0xae, 0x06, 0x69, 0xc9, 0x87, 0x69, 0xf4, 0x1e, 0xac, 0x98, 0xdc, 0x63, 0xbe, 0x88, 0x7d, 0x2a, + 0xb1, 0x98, 0x61, 0xa2, 0x94, 0x64, 0xfe, 0x42, 0xd1, 0xc4, 0x7a, 0x76, 0x6a, 0xb4, 0xeb, 0xde, + 0x71, 0x4c, 0xee, 0x87, 0x5a, 0xbe, 0x9d, 0xf5, 0x0a, 0x11, 0x7d, 0x84, 0x97, 0x65, 0xa3, 0x62, + 0x31, 0x9d, 0x62, 0xba, 0xa4, 0x5c, 0x25, 0xd6, 0xb6, 0xb6, 0xbe, 0x58, 0xb3, 0x8e, 0x53, 0xf9, + 0x52, 0xab, 0x68, 0x0c, 0xed, 0xa7, 0x8a, 0xe2, 0x39, 0x95, 0xeb, 0x28, 0x6b, 0x47, 0x93, 0xec, + 0x47, 0x9b, 0x18, 0x51, 0xb9, 0xc2, 0xa2, 0x0e, 0xb4, 0xca, 0xd4, 0x88, 0xf1, 0xef, 0x89, 0xb5, + 0xab, 0x01, 0xe6, 0x1a, 0x60, 0x90, 0xe6, 0xd1, 0x35, 0xbc, 0xae, 0x6c, 0x22, 0x75, 0x5b, 0x7b, + 0xda, 0x7c, 0xf2, 0x54, 0xf5, 0x94, 0xd4, 0xdf, 0x87, 0xdd, 0xfc, 0xd7, 0xb1, 0xff, 0x18, 0x70, + 0xb4, 0xb1, 0x41, 0x68, 0x02, 0x7b, 0x53, 0x1a, 0xb0, 0x84, 0x09, 0xae, 0x57, 0xf0, 0xf0, 0xec, + 0xd3, 0x7f, 0xad, 0x60, 0x11, 0x5f, 0xe4, 0x14, 0xaf, 0xe0, 0xd9, 0x17, 0x60, 0x6e, 0xaa, 0xe8, + 0x10, 0xa0, 0x37, 0xf8, 0xd2, 0xfb, 0x7a, 0x87, 0x6f, 0xaf, 0xae, 0xcc, 0x1a, 0x6a, 0xc0, 0xfe, + 0xbf, 0x78, 0x68, 0x1a, 0xa8, 0x09, 0x8d, 0x3c, 0x1c, 0xf5, 0xbc, 0xcb, 0xe1, 0xd8, 0xdc, 0xb2, + 0x3f, 0x40, 0xf3, 0xc1, 0x5a, 0xa2, 0x37, 0xd0, 0xd0, 0x53, 0x31, 0x1e, 0xea, 0xac, 0xee, 0xdd, + 0xf0, 0xca, 0x49, 0xfb, 0x2d, 0xb4, 0x1e, 0x59, 0x36, 0x64, 0x42, 0xfd, 0xc7, 0x3c, 0xd1, 0x96, + 0xba, 0x97, 0x7e, 0xf6, 0x7f, 0xc1, 0x2b, 0x26, 0x2a, 0xe7, 0xee, 0x9b, 0x6b, 0x7f, 0xe0, 0x51, + 0x2a, 0x8d, 0x8c, 0xc9, 0x75, 0xc8, 0xd4, 0xb7, 0x85, 0xef, 0x04, 0x22, 0xd6, 0x17, 0xa3, 0xb3, + 0x3a, 0x19, 0x25, 0x56, 0x27, 0x3b, 0x20, 0x21, 0xe5, 0x6e, 0x28, 0xdc, 0x40, 0x44, 0x11, 0x0d, + 0x94, 0x90, 0xc5, 0x45, 0xf1, 0x77, 0xf4, 0x83, 0x77, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x53, + 0xba, 0x65, 0xf8, 0x78, 0x04, 0x00, 0x00, +} diff --git a/internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.go b/internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.go new file mode 100644 index 0000000000..425527cdb4 --- /dev/null +++ b/internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.go @@ -0,0 +1,213 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/collector/trace/v1/trace_service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/trace/v1" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ExportTraceServiceRequest struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + ResourceSpans []*v1.ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} } +func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) } +func (*ExportTraceServiceRequest) ProtoMessage() {} +func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_192a962890318cf4, []int{0} +} +func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportTraceServiceRequest.Unmarshal(m, b) +} +func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic) +} +func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src) +} +func (m *ExportTraceServiceRequest) XXX_Size() int { + return xxx_messageInfo_ExportTraceServiceRequest.Size(m) +} +func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo + +func (m *ExportTraceServiceRequest) GetResourceSpans() []*v1.ResourceSpans { + if m != nil { + return m.ResourceSpans + } + return nil +} + +type ExportTraceServiceResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} } +func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ExportTraceServiceResponse) ProtoMessage() {} +func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_192a962890318cf4, []int{1} +} +func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportTraceServiceResponse.Unmarshal(m, b) +} +func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic) +} +func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src) +} +func (m *ExportTraceServiceResponse) XXX_Size() int { + return xxx_messageInfo_ExportTraceServiceResponse.Size(m) +} +func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ExportTraceServiceRequest)(nil), "opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest") + proto.RegisterType((*ExportTraceServiceResponse)(nil), "opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/collector/trace/v1/trace_service.proto", fileDescriptor_192a962890318cf4) +} + +var fileDescriptor_192a962890318cf4 = []byte{ + // 265 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0xca, 0x2f, 0x48, 0xcd, + 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, + 0x4f, 0xce, 0xcf, 0xc9, 0x49, 0x4d, 0x2e, 0xc9, 0x2f, 0xd2, 0x2f, 0x29, 0x4a, 0x4c, 0x4e, 0xd5, + 0x2f, 0x33, 0x84, 0x30, 0xe2, 0x8b, 0x53, 0x8b, 0xca, 0x32, 0x93, 0x53, 0xf5, 0xc0, 0xca, 0x84, + 0xd4, 0x50, 0xf4, 0x42, 0x04, 0xf5, 0xe0, 0x7a, 0xf5, 0xc0, 0x5a, 0xf4, 0xca, 0x0c, 0xa5, 0x34, + 0xb0, 0xd9, 0x81, 0x6a, 0x32, 0x44, 0xb3, 0x52, 0x3e, 0x97, 0xa4, 0x6b, 0x45, 0x41, 0x7e, 0x51, + 0x49, 0x08, 0x48, 0x30, 0x18, 0x62, 0x5b, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x50, 0x10, + 0x17, 0x5f, 0x51, 0x6a, 0x71, 0x7e, 0x69, 0x11, 0xc8, 0x21, 0x05, 0x89, 0x79, 0xc5, 0x12, 0x8c, + 0x0a, 0xcc, 0x1a, 0xdc, 0x46, 0xda, 0x7a, 0xd8, 0xdc, 0x01, 0xb3, 0x5d, 0x2f, 0x08, 0xaa, 0x27, + 0x18, 0xa4, 0x25, 0x88, 0xb7, 0x08, 0x99, 0xab, 0x24, 0xc3, 0x25, 0x85, 0xcd, 0xc2, 0xe2, 0x82, + 0xfc, 0xbc, 0xe2, 0x54, 0xa3, 0x45, 0x8c, 0x5c, 0x3c, 0xc8, 0x12, 0x42, 0x13, 0x19, 0xb9, 0xd8, + 0x20, 0xea, 0x85, 0x1c, 0xf5, 0x88, 0xf3, 0xbd, 0x1e, 0x4e, 0x0f, 0x49, 0x39, 0x51, 0x62, 0x04, + 0xc4, 0x89, 0x4a, 0x0c, 0x4e, 0x9d, 0x8c, 0x5c, 0x9a, 0x99, 0xf9, 0x44, 0x1a, 0xe5, 0x24, 0x88, + 0x6c, 0x4a, 0x00, 0x48, 0x55, 0x00, 0x63, 0x94, 0x7b, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, + 0x72, 0x7e, 0xae, 0x3e, 0xc8, 0x1c, 0x5d, 0x44, 0x64, 0xa1, 0x18, 0xab, 0x0b, 0x89, 0xba, 0xf4, + 0xd4, 0x3c, 0xfd, 0x74, 0x6c, 0xa9, 0x24, 0x89, 0x0d, 0xac, 0xc0, 0x18, 0x10, 0x00, 0x00, 0xff, + 0xff, 0xc1, 0x6e, 0x1a, 0x15, 0x56, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// TraceServiceClient is the client API for TraceService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TraceServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) +} + +type traceServiceClient struct { + cc *grpc.ClientConn +} + +func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { + return &traceServiceClient{cc} +} + +func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) { + out := new(ExportTraceServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TraceServiceServer is the server API for TraceService service. +type TraceServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) +} + +// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations. +type UnimplementedTraceServiceServer struct { +} + +func (*UnimplementedTraceServiceServer) Export(ctx context.Context, req *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { + s.RegisterService(&_TraceService_serviceDesc, srv) +} + +func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportTraceServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TraceServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.trace.v1.TraceService/Export", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TraceServiceServer).Export(ctx, req.(*ExportTraceServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _TraceService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService", + HandlerType: (*TraceServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: _TraceService_Export_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/trace/v1/trace_service.proto", +} diff --git a/internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.gw.go b/internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.gw.go new file mode 100644 index 0000000000..1da38f1cd2 --- /dev/null +++ b/internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.gw.go @@ -0,0 +1,163 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opentelemetry/proto/collector/trace/v1/trace_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage + +func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportTraceServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server TraceServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportTraceServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Export(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterTraceServiceHandlerServer registers the http handlers for service TraceService to "mux". +// UnaryRPC :call TraceServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server TraceServiceServer) error { + + mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_TraceService_Export_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterTraceServiceHandler(ctx, mux, conn) +} + +// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn)) +} + +// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "TraceServiceClient" to call the correct interceptors. +func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error { + + mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_TraceService_Export_0 = runtime.ForwardResponseMessage +) diff --git a/internal/opentelemetry-proto-gen/common/v1/common.pb.go b/internal/opentelemetry-proto-gen/common/v1/common.pb.go new file mode 100644 index 0000000000..dd951ce8e0 --- /dev/null +++ b/internal/opentelemetry-proto-gen/common/v1/common.pb.go @@ -0,0 +1,430 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/common/v1/common.proto + +package v1 + +import ( + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// AnyValue is used to represent any type of attribute value. AnyValue may contain a +// primitive value such as a string or integer or it may contain an arbitrary nested +// object containing arrays, key-value lists and primitives. +type AnyValue struct { + // The value is one of the listed fields. It is valid for all values to be unspecified + // in which case this AnyValue is considered to be "null". + // + // Types that are valid to be assigned to Value: + // *AnyValue_StringValue + // *AnyValue_BoolValue + // *AnyValue_IntValue + // *AnyValue_DoubleValue + // *AnyValue_ArrayValue + // *AnyValue_KvlistValue + Value isAnyValue_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AnyValue) Reset() { *m = AnyValue{} } +func (m *AnyValue) String() string { return proto.CompactTextString(m) } +func (*AnyValue) ProtoMessage() {} +func (*AnyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_62ba46dcb97aa817, []int{0} +} +func (m *AnyValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AnyValue.Unmarshal(m, b) +} +func (m *AnyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AnyValue.Marshal(b, m, deterministic) +} +func (m *AnyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_AnyValue.Merge(m, src) +} +func (m *AnyValue) XXX_Size() int { + return xxx_messageInfo_AnyValue.Size(m) +} +func (m *AnyValue) XXX_DiscardUnknown() { + xxx_messageInfo_AnyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_AnyValue proto.InternalMessageInfo + +type isAnyValue_Value interface { + isAnyValue_Value() +} + +type AnyValue_StringValue struct { + StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof" json:"string_value,omitempty"` +} +type AnyValue_BoolValue struct { + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof" json:"bool_value,omitempty"` +} +type AnyValue_IntValue struct { + IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof" json:"int_value,omitempty"` +} +type AnyValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof" json:"double_value,omitempty"` +} +type AnyValue_ArrayValue struct { + ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof" json:"array_value,omitempty"` +} +type AnyValue_KvlistValue struct { + KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof" json:"kvlist_value,omitempty"` +} + +func (*AnyValue_StringValue) isAnyValue_Value() {} +func (*AnyValue_BoolValue) isAnyValue_Value() {} +func (*AnyValue_IntValue) isAnyValue_Value() {} +func (*AnyValue_DoubleValue) isAnyValue_Value() {} +func (*AnyValue_ArrayValue) isAnyValue_Value() {} +func (*AnyValue_KvlistValue) isAnyValue_Value() {} + +func (m *AnyValue) GetValue() isAnyValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *AnyValue) GetStringValue() string { + if x, ok := m.GetValue().(*AnyValue_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *AnyValue) GetBoolValue() bool { + if x, ok := m.GetValue().(*AnyValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *AnyValue) GetIntValue() int64 { + if x, ok := m.GetValue().(*AnyValue_IntValue); ok { + return x.IntValue + } + return 0 +} + +func (m *AnyValue) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*AnyValue_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (m *AnyValue) GetArrayValue() *ArrayValue { + if x, ok := m.GetValue().(*AnyValue_ArrayValue); ok { + return x.ArrayValue + } + return nil +} + +func (m *AnyValue) GetKvlistValue() *KeyValueList { + if x, ok := m.GetValue().(*AnyValue_KvlistValue); ok { + return x.KvlistValue + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AnyValue) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*AnyValue_StringValue)(nil), + (*AnyValue_BoolValue)(nil), + (*AnyValue_IntValue)(nil), + (*AnyValue_DoubleValue)(nil), + (*AnyValue_ArrayValue)(nil), + (*AnyValue_KvlistValue)(nil), + } +} + +// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message +// since oneof in AnyValue does not allow repeated fields. +type ArrayValue struct { + // Array of values. The array may be empty (contain 0 elements). + Values []*AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ArrayValue) Reset() { *m = ArrayValue{} } +func (m *ArrayValue) String() string { return proto.CompactTextString(m) } +func (*ArrayValue) ProtoMessage() {} +func (*ArrayValue) Descriptor() ([]byte, []int) { + return fileDescriptor_62ba46dcb97aa817, []int{1} +} +func (m *ArrayValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ArrayValue.Unmarshal(m, b) +} +func (m *ArrayValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ArrayValue.Marshal(b, m, deterministic) +} +func (m *ArrayValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArrayValue.Merge(m, src) +} +func (m *ArrayValue) XXX_Size() int { + return xxx_messageInfo_ArrayValue.Size(m) +} +func (m *ArrayValue) XXX_DiscardUnknown() { + xxx_messageInfo_ArrayValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ArrayValue proto.InternalMessageInfo + +func (m *ArrayValue) GetValues() []*AnyValue { + if m != nil { + return m.Values + } + return nil +} + +// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message +// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need +// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to +// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches +// are semantically equivalent. +type KeyValueList struct { + // A collection of key/value pairs of key-value pairs. The list may be empty (may + // contain 0 elements). + Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyValueList) Reset() { *m = KeyValueList{} } +func (m *KeyValueList) String() string { return proto.CompactTextString(m) } +func (*KeyValueList) ProtoMessage() {} +func (*KeyValueList) Descriptor() ([]byte, []int) { + return fileDescriptor_62ba46dcb97aa817, []int{2} +} +func (m *KeyValueList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyValueList.Unmarshal(m, b) +} +func (m *KeyValueList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyValueList.Marshal(b, m, deterministic) +} +func (m *KeyValueList) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyValueList.Merge(m, src) +} +func (m *KeyValueList) XXX_Size() int { + return xxx_messageInfo_KeyValueList.Size(m) +} +func (m *KeyValueList) XXX_DiscardUnknown() { + xxx_messageInfo_KeyValueList.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyValueList proto.InternalMessageInfo + +func (m *KeyValueList) GetValues() []*KeyValue { + if m != nil { + return m.Values + } + return nil +} + +// KeyValue is a key-value pair that is used to store Span attributes, Link +// attributes, etc. +type KeyValue struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (m *KeyValue) String() string { return proto.CompactTextString(m) } +func (*KeyValue) ProtoMessage() {} +func (*KeyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_62ba46dcb97aa817, []int{3} +} +func (m *KeyValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyValue.Unmarshal(m, b) +} +func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic) +} +func (m *KeyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyValue.Merge(m, src) +} +func (m *KeyValue) XXX_Size() int { + return xxx_messageInfo_KeyValue.Size(m) +} +func (m *KeyValue) XXX_DiscardUnknown() { + xxx_messageInfo_KeyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyValue proto.InternalMessageInfo + +func (m *KeyValue) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *KeyValue) GetValue() *AnyValue { + if m != nil { + return m.Value + } + return nil +} + +// StringKeyValue is a pair of key/value strings. This is the simpler (and faster) version +// of KeyValue that only supports string values. +type StringKeyValue struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringKeyValue) Reset() { *m = StringKeyValue{} } +func (m *StringKeyValue) String() string { return proto.CompactTextString(m) } +func (*StringKeyValue) ProtoMessage() {} +func (*StringKeyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_62ba46dcb97aa817, []int{4} +} +func (m *StringKeyValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StringKeyValue.Unmarshal(m, b) +} +func (m *StringKeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StringKeyValue.Marshal(b, m, deterministic) +} +func (m *StringKeyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringKeyValue.Merge(m, src) +} +func (m *StringKeyValue) XXX_Size() int { + return xxx_messageInfo_StringKeyValue.Size(m) +} +func (m *StringKeyValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringKeyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringKeyValue proto.InternalMessageInfo + +func (m *StringKeyValue) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *StringKeyValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// InstrumentationLibrary is a message representing the instrumentation library information +// such as the fully qualified name and version. +type InstrumentationLibrary struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InstrumentationLibrary) Reset() { *m = InstrumentationLibrary{} } +func (m *InstrumentationLibrary) String() string { return proto.CompactTextString(m) } +func (*InstrumentationLibrary) ProtoMessage() {} +func (*InstrumentationLibrary) Descriptor() ([]byte, []int) { + return fileDescriptor_62ba46dcb97aa817, []int{5} +} +func (m *InstrumentationLibrary) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InstrumentationLibrary.Unmarshal(m, b) +} +func (m *InstrumentationLibrary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InstrumentationLibrary.Marshal(b, m, deterministic) +} +func (m *InstrumentationLibrary) XXX_Merge(src proto.Message) { + xxx_messageInfo_InstrumentationLibrary.Merge(m, src) +} +func (m *InstrumentationLibrary) XXX_Size() int { + return xxx_messageInfo_InstrumentationLibrary.Size(m) +} +func (m *InstrumentationLibrary) XXX_DiscardUnknown() { + xxx_messageInfo_InstrumentationLibrary.DiscardUnknown(m) +} + +var xxx_messageInfo_InstrumentationLibrary proto.InternalMessageInfo + +func (m *InstrumentationLibrary) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *InstrumentationLibrary) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func init() { + proto.RegisterType((*AnyValue)(nil), "opentelemetry.proto.common.v1.AnyValue") + proto.RegisterType((*ArrayValue)(nil), "opentelemetry.proto.common.v1.ArrayValue") + proto.RegisterType((*KeyValueList)(nil), "opentelemetry.proto.common.v1.KeyValueList") + proto.RegisterType((*KeyValue)(nil), "opentelemetry.proto.common.v1.KeyValue") + proto.RegisterType((*StringKeyValue)(nil), "opentelemetry.proto.common.v1.StringKeyValue") + proto.RegisterType((*InstrumentationLibrary)(nil), "opentelemetry.proto.common.v1.InstrumentationLibrary") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/common/v1/common.proto", fileDescriptor_62ba46dcb97aa817) +} + +var fileDescriptor_62ba46dcb97aa817 = []byte{ + // 411 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x4b, 0xab, 0xd3, 0x40, + 0x14, 0xce, 0xdc, 0xdc, 0xdb, 0x9b, 0x9c, 0x14, 0x91, 0x41, 0xa4, 0x9b, 0x8b, 0xa1, 0x2e, 0x8c, + 0xca, 0x4d, 0x68, 0xdd, 0xb8, 0x51, 0x69, 0x05, 0x89, 0x58, 0xb1, 0x44, 0x70, 0xa1, 0x0b, 0x49, + 0x74, 0x88, 0x43, 0x93, 0x99, 0x3a, 0x99, 0x04, 0xf2, 0xe3, 0xfc, 0x6f, 0x32, 0x8f, 0xf4, 0xb1, + 0x69, 0xe9, 0xee, 0xcc, 0x97, 0xef, 0x71, 0x4e, 0x66, 0x0e, 0xbc, 0xe0, 0x5b, 0xc2, 0x24, 0xa9, + 0x48, 0x4d, 0xa4, 0xe8, 0x93, 0xad, 0xe0, 0x92, 0x27, 0xbf, 0x78, 0x5d, 0x73, 0x96, 0x74, 0x33, + 0x5b, 0xc5, 0x1a, 0xc6, 0x77, 0x47, 0x5c, 0x03, 0xc6, 0x96, 0xd1, 0xcd, 0xa6, 0xff, 0xae, 0xc0, + 0x5b, 0xb0, 0xfe, 0x5b, 0x5e, 0xb5, 0x04, 0x3f, 0x85, 0x71, 0x23, 0x05, 0x65, 0xe5, 0xcf, 0x4e, + 0x9d, 0x27, 0x28, 0x44, 0x91, 0x9f, 0x3a, 0x59, 0x60, 0x50, 0x43, 0x7a, 0x02, 0x50, 0x70, 0x5e, + 0x59, 0xca, 0x55, 0x88, 0x22, 0x2f, 0x75, 0x32, 0x5f, 0x61, 0x86, 0x70, 0x07, 0x3e, 0x65, 0xd2, + 0x7e, 0x77, 0x43, 0x14, 0xb9, 0xa9, 0x93, 0x79, 0x94, 0xc9, 0x5d, 0xc8, 0x6f, 0xde, 0x16, 0x15, + 0xb1, 0x8c, 0xeb, 0x10, 0x45, 0x48, 0x85, 0x18, 0xd4, 0x90, 0x56, 0x10, 0xe4, 0x42, 0xe4, 0xbd, + 0xe5, 0xdc, 0x84, 0x28, 0x0a, 0xe6, 0xcf, 0xe3, 0x93, 0xb3, 0xc4, 0x0b, 0xa5, 0xd0, 0xfa, 0xd4, + 0xc9, 0x20, 0xdf, 0x9d, 0xf0, 0x1a, 0xc6, 0x9b, 0xae, 0xa2, 0xcd, 0xd0, 0xd4, 0x48, 0xdb, 0xbd, + 0x3c, 0x63, 0xf7, 0x89, 0x18, 0xf9, 0x8a, 0x36, 0x52, 0xf5, 0x67, 0x2c, 0x34, 0xb4, 0xbc, 0x85, + 0x1b, 0x6d, 0x35, 0xfd, 0x0c, 0xb0, 0x8f, 0xc5, 0xef, 0x60, 0xa4, 0xe1, 0x66, 0x82, 0x42, 0x37, + 0x0a, 0xe6, 0xcf, 0xce, 0x75, 0x6c, 0xff, 0x7c, 0x66, 0x65, 0xd3, 0x2f, 0x30, 0x3e, 0x8c, 0xbd, + 0xd8, 0x70, 0x10, 0xef, 0x0c, 0x7f, 0x80, 0x37, 0x60, 0xf8, 0x21, 0xb8, 0x1b, 0xd2, 0x9b, 0x5b, + 0xcd, 0x54, 0x89, 0xdf, 0xd8, 0x31, 0xf4, 0x35, 0x5e, 0xd0, 0xae, 0x1d, 0xfe, 0x35, 0x3c, 0xf8, + 0xaa, 0x5f, 0xc6, 0x89, 0x88, 0x47, 0x87, 0x11, 0xfe, 0xa0, 0xfc, 0x00, 0x8f, 0x3f, 0xb2, 0x46, + 0x8a, 0xb6, 0x26, 0x4c, 0xe6, 0x92, 0x72, 0xb6, 0xa2, 0x85, 0xc8, 0x45, 0x8f, 0x31, 0x5c, 0xb3, + 0xbc, 0xb6, 0x6f, 0x2f, 0xd3, 0x35, 0x9e, 0xc0, 0x6d, 0x47, 0x44, 0x43, 0x39, 0xb3, 0x2e, 0xc3, + 0x71, 0xf9, 0x17, 0x42, 0xca, 0x4f, 0x77, 0xbd, 0x0c, 0xde, 0xeb, 0x72, 0xad, 0xe0, 0x35, 0xfa, + 0xfe, 0xb6, 0xa4, 0xf2, 0x4f, 0x5b, 0x28, 0x42, 0xa2, 0x84, 0xf7, 0xfb, 0x45, 0x3a, 0xf2, 0xb9, + 0x37, 0x6b, 0x55, 0x12, 0x96, 0x94, 0x07, 0xdb, 0x55, 0x8c, 0x34, 0xfe, 0xea, 0x7f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x58, 0xdb, 0x68, 0x5e, 0x85, 0x03, 0x00, 0x00, +} diff --git a/internal/opentelemetry-proto-gen/logs/v1/logs.pb.go b/internal/opentelemetry-proto-gen/logs/v1/logs.pb.go new file mode 100644 index 0000000000..04b6212702 --- /dev/null +++ b/internal/opentelemetry-proto-gen/logs/v1/logs.pb.go @@ -0,0 +1,448 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/logs/v1/logs.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + v11 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" + v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/resource/v1" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Possible values for LogRecord.SeverityNumber. +type SeverityNumber int32 + +const ( + // UNSPECIFIED is the default SeverityNumber, it MUST not be used. + SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED SeverityNumber = 0 + SeverityNumber_SEVERITY_NUMBER_TRACE SeverityNumber = 1 + SeverityNumber_SEVERITY_NUMBER_TRACE2 SeverityNumber = 2 + SeverityNumber_SEVERITY_NUMBER_TRACE3 SeverityNumber = 3 + SeverityNumber_SEVERITY_NUMBER_TRACE4 SeverityNumber = 4 + SeverityNumber_SEVERITY_NUMBER_DEBUG SeverityNumber = 5 + SeverityNumber_SEVERITY_NUMBER_DEBUG2 SeverityNumber = 6 + SeverityNumber_SEVERITY_NUMBER_DEBUG3 SeverityNumber = 7 + SeverityNumber_SEVERITY_NUMBER_DEBUG4 SeverityNumber = 8 + SeverityNumber_SEVERITY_NUMBER_INFO SeverityNumber = 9 + SeverityNumber_SEVERITY_NUMBER_INFO2 SeverityNumber = 10 + SeverityNumber_SEVERITY_NUMBER_INFO3 SeverityNumber = 11 + SeverityNumber_SEVERITY_NUMBER_INFO4 SeverityNumber = 12 + SeverityNumber_SEVERITY_NUMBER_WARN SeverityNumber = 13 + SeverityNumber_SEVERITY_NUMBER_WARN2 SeverityNumber = 14 + SeverityNumber_SEVERITY_NUMBER_WARN3 SeverityNumber = 15 + SeverityNumber_SEVERITY_NUMBER_WARN4 SeverityNumber = 16 + SeverityNumber_SEVERITY_NUMBER_ERROR SeverityNumber = 17 + SeverityNumber_SEVERITY_NUMBER_ERROR2 SeverityNumber = 18 + SeverityNumber_SEVERITY_NUMBER_ERROR3 SeverityNumber = 19 + SeverityNumber_SEVERITY_NUMBER_ERROR4 SeverityNumber = 20 + SeverityNumber_SEVERITY_NUMBER_FATAL SeverityNumber = 21 + SeverityNumber_SEVERITY_NUMBER_FATAL2 SeverityNumber = 22 + SeverityNumber_SEVERITY_NUMBER_FATAL3 SeverityNumber = 23 + SeverityNumber_SEVERITY_NUMBER_FATAL4 SeverityNumber = 24 +) + +var SeverityNumber_name = map[int32]string{ + 0: "SEVERITY_NUMBER_UNSPECIFIED", + 1: "SEVERITY_NUMBER_TRACE", + 2: "SEVERITY_NUMBER_TRACE2", + 3: "SEVERITY_NUMBER_TRACE3", + 4: "SEVERITY_NUMBER_TRACE4", + 5: "SEVERITY_NUMBER_DEBUG", + 6: "SEVERITY_NUMBER_DEBUG2", + 7: "SEVERITY_NUMBER_DEBUG3", + 8: "SEVERITY_NUMBER_DEBUG4", + 9: "SEVERITY_NUMBER_INFO", + 10: "SEVERITY_NUMBER_INFO2", + 11: "SEVERITY_NUMBER_INFO3", + 12: "SEVERITY_NUMBER_INFO4", + 13: "SEVERITY_NUMBER_WARN", + 14: "SEVERITY_NUMBER_WARN2", + 15: "SEVERITY_NUMBER_WARN3", + 16: "SEVERITY_NUMBER_WARN4", + 17: "SEVERITY_NUMBER_ERROR", + 18: "SEVERITY_NUMBER_ERROR2", + 19: "SEVERITY_NUMBER_ERROR3", + 20: "SEVERITY_NUMBER_ERROR4", + 21: "SEVERITY_NUMBER_FATAL", + 22: "SEVERITY_NUMBER_FATAL2", + 23: "SEVERITY_NUMBER_FATAL3", + 24: "SEVERITY_NUMBER_FATAL4", +} + +var SeverityNumber_value = map[string]int32{ + "SEVERITY_NUMBER_UNSPECIFIED": 0, + "SEVERITY_NUMBER_TRACE": 1, + "SEVERITY_NUMBER_TRACE2": 2, + "SEVERITY_NUMBER_TRACE3": 3, + "SEVERITY_NUMBER_TRACE4": 4, + "SEVERITY_NUMBER_DEBUG": 5, + "SEVERITY_NUMBER_DEBUG2": 6, + "SEVERITY_NUMBER_DEBUG3": 7, + "SEVERITY_NUMBER_DEBUG4": 8, + "SEVERITY_NUMBER_INFO": 9, + "SEVERITY_NUMBER_INFO2": 10, + "SEVERITY_NUMBER_INFO3": 11, + "SEVERITY_NUMBER_INFO4": 12, + "SEVERITY_NUMBER_WARN": 13, + "SEVERITY_NUMBER_WARN2": 14, + "SEVERITY_NUMBER_WARN3": 15, + "SEVERITY_NUMBER_WARN4": 16, + "SEVERITY_NUMBER_ERROR": 17, + "SEVERITY_NUMBER_ERROR2": 18, + "SEVERITY_NUMBER_ERROR3": 19, + "SEVERITY_NUMBER_ERROR4": 20, + "SEVERITY_NUMBER_FATAL": 21, + "SEVERITY_NUMBER_FATAL2": 22, + "SEVERITY_NUMBER_FATAL3": 23, + "SEVERITY_NUMBER_FATAL4": 24, +} + +func (x SeverityNumber) String() string { + return proto.EnumName(SeverityNumber_name, int32(x)) +} + +func (SeverityNumber) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d1c030a3ec7e961e, []int{0} +} + +// Masks for LogRecord.flags field. +type LogRecordFlags int32 + +const ( + LogRecordFlags_LOG_RECORD_FLAG_UNSPECIFIED LogRecordFlags = 0 + LogRecordFlags_LOG_RECORD_FLAG_TRACE_FLAGS_MASK LogRecordFlags = 255 +) + +var LogRecordFlags_name = map[int32]string{ + 0: "LOG_RECORD_FLAG_UNSPECIFIED", + 255: "LOG_RECORD_FLAG_TRACE_FLAGS_MASK", +} + +var LogRecordFlags_value = map[string]int32{ + "LOG_RECORD_FLAG_UNSPECIFIED": 0, + "LOG_RECORD_FLAG_TRACE_FLAGS_MASK": 255, +} + +func (x LogRecordFlags) String() string { + return proto.EnumName(LogRecordFlags_name, int32(x)) +} + +func (LogRecordFlags) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d1c030a3ec7e961e, []int{1} +} + +// A collection of InstrumentationLibraryLogs from a Resource. +type ResourceLogs struct { + // The resource for the logs in this message. + // If this field is not set then no resource info is known. + Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // A list of InstrumentationLibraryLogs that originate from a resource. + InstrumentationLibraryLogs []*InstrumentationLibraryLogs `protobuf:"bytes,2,rep,name=instrumentation_library_logs,json=instrumentationLibraryLogs,proto3" json:"instrumentation_library_logs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceLogs) Reset() { *m = ResourceLogs{} } +func (m *ResourceLogs) String() string { return proto.CompactTextString(m) } +func (*ResourceLogs) ProtoMessage() {} +func (*ResourceLogs) Descriptor() ([]byte, []int) { + return fileDescriptor_d1c030a3ec7e961e, []int{0} +} +func (m *ResourceLogs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourceLogs.Unmarshal(m, b) +} +func (m *ResourceLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourceLogs.Marshal(b, m, deterministic) +} +func (m *ResourceLogs) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceLogs.Merge(m, src) +} +func (m *ResourceLogs) XXX_Size() int { + return xxx_messageInfo_ResourceLogs.Size(m) +} +func (m *ResourceLogs) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceLogs.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceLogs proto.InternalMessageInfo + +func (m *ResourceLogs) GetResource() *v1.Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *ResourceLogs) GetInstrumentationLibraryLogs() []*InstrumentationLibraryLogs { + if m != nil { + return m.InstrumentationLibraryLogs + } + return nil +} + +// A collection of Logs produced by an InstrumentationLibrary. +type InstrumentationLibraryLogs struct { + // The instrumentation library information for the logs in this message. + // If this field is not set then no library info is known. + InstrumentationLibrary *v11.InstrumentationLibrary `protobuf:"bytes,1,opt,name=instrumentation_library,json=instrumentationLibrary,proto3" json:"instrumentation_library,omitempty"` + // A list of log records. + Logs []*LogRecord `protobuf:"bytes,2,rep,name=logs,proto3" json:"logs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InstrumentationLibraryLogs) Reset() { *m = InstrumentationLibraryLogs{} } +func (m *InstrumentationLibraryLogs) String() string { return proto.CompactTextString(m) } +func (*InstrumentationLibraryLogs) ProtoMessage() {} +func (*InstrumentationLibraryLogs) Descriptor() ([]byte, []int) { + return fileDescriptor_d1c030a3ec7e961e, []int{1} +} +func (m *InstrumentationLibraryLogs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InstrumentationLibraryLogs.Unmarshal(m, b) +} +func (m *InstrumentationLibraryLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InstrumentationLibraryLogs.Marshal(b, m, deterministic) +} +func (m *InstrumentationLibraryLogs) XXX_Merge(src proto.Message) { + xxx_messageInfo_InstrumentationLibraryLogs.Merge(m, src) +} +func (m *InstrumentationLibraryLogs) XXX_Size() int { + return xxx_messageInfo_InstrumentationLibraryLogs.Size(m) +} +func (m *InstrumentationLibraryLogs) XXX_DiscardUnknown() { + xxx_messageInfo_InstrumentationLibraryLogs.DiscardUnknown(m) +} + +var xxx_messageInfo_InstrumentationLibraryLogs proto.InternalMessageInfo + +func (m *InstrumentationLibraryLogs) GetInstrumentationLibrary() *v11.InstrumentationLibrary { + if m != nil { + return m.InstrumentationLibrary + } + return nil +} + +func (m *InstrumentationLibraryLogs) GetLogs() []*LogRecord { + if m != nil { + return m.Logs + } + return nil +} + +// A log record according to OpenTelemetry Log Data Model: +// https://github.com/open-telemetry/oteps/blob/master/text/logs/0097-log-data-model.md +type LogRecord struct { + // time_unix_nano is the time when the event occurred. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // Value of 0 indicates unknown or missing timestamp. + TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // Numerical value of the severity, normalized to values described in Log Data Model. + // [Optional]. + SeverityNumber SeverityNumber `protobuf:"varint,2,opt,name=severity_number,json=severityNumber,proto3,enum=opentelemetry.proto.logs.v1.SeverityNumber" json:"severity_number,omitempty"` + // The severity text (also known as log level). The original string representation as + // it is known at the source. [Optional]. + SeverityText string `protobuf:"bytes,3,opt,name=severity_text,json=severityText,proto3" json:"severity_text,omitempty"` + // Short event identifier that does not contain varying parts. Name describes + // what happened (e.g. "ProcessStarted"). Recommended to be no longer than 50 + // characters. Not guaranteed to be unique in any way. [Optional]. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // A value containing the body of the log record. Can be for example a human-readable + // string message (including multi-line) describing the event in a free form or it can + // be a structured data composed of arrays and maps of other values. [Optional]. + Body *v11.AnyValue `protobuf:"bytes,5,opt,name=body,proto3" json:"body,omitempty"` + // Additional attributes that describe the specific event occurrence. [Optional]. + Attributes []*v11.KeyValue `protobuf:"bytes,6,rep,name=attributes,proto3" json:"attributes,omitempty"` + DroppedAttributesCount uint32 `protobuf:"varint,7,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // Flags, a bit field. 8 least significant bits are the trace flags as + // defined in W3C Trace Context specification. 24 most significant bits are reserved + // and must be set to 0. Readers must not assume that 24 most significant bits + // will be zero and must correctly mask the bits when reading 8-bit trace flag (use + // flags & TRACE_FLAGS_MASK). [Optional]. + Flags uint32 `protobuf:"fixed32,8,opt,name=flags,proto3" json:"flags,omitempty"` + // A unique identifier for a trace. All logs from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes + // is considered invalid. Can be set for logs that are part of request processing + // and have an assigned trace id. [Optional]. + TraceId []byte `protobuf:"bytes,9,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes is considered + // invalid. Can be set for logs that are part of a particular processing span. + // If span_id is present trace_id SHOULD be also present. [Optional]. + SpanId []byte `protobuf:"bytes,10,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogRecord) Reset() { *m = LogRecord{} } +func (m *LogRecord) String() string { return proto.CompactTextString(m) } +func (*LogRecord) ProtoMessage() {} +func (*LogRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_d1c030a3ec7e961e, []int{2} +} +func (m *LogRecord) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogRecord.Unmarshal(m, b) +} +func (m *LogRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogRecord.Marshal(b, m, deterministic) +} +func (m *LogRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogRecord.Merge(m, src) +} +func (m *LogRecord) XXX_Size() int { + return xxx_messageInfo_LogRecord.Size(m) +} +func (m *LogRecord) XXX_DiscardUnknown() { + xxx_messageInfo_LogRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_LogRecord proto.InternalMessageInfo + +func (m *LogRecord) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *LogRecord) GetSeverityNumber() SeverityNumber { + if m != nil { + return m.SeverityNumber + } + return SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED +} + +func (m *LogRecord) GetSeverityText() string { + if m != nil { + return m.SeverityText + } + return "" +} + +func (m *LogRecord) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *LogRecord) GetBody() *v11.AnyValue { + if m != nil { + return m.Body + } + return nil +} + +func (m *LogRecord) GetAttributes() []*v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *LogRecord) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +func (m *LogRecord) GetFlags() uint32 { + if m != nil { + return m.Flags + } + return 0 +} + +func (m *LogRecord) GetTraceId() []byte { + if m != nil { + return m.TraceId + } + return nil +} + +func (m *LogRecord) GetSpanId() []byte { + if m != nil { + return m.SpanId + } + return nil +} + +func init() { + proto.RegisterEnum("opentelemetry.proto.logs.v1.SeverityNumber", SeverityNumber_name, SeverityNumber_value) + proto.RegisterEnum("opentelemetry.proto.logs.v1.LogRecordFlags", LogRecordFlags_name, LogRecordFlags_value) + proto.RegisterType((*ResourceLogs)(nil), "opentelemetry.proto.logs.v1.ResourceLogs") + proto.RegisterType((*InstrumentationLibraryLogs)(nil), "opentelemetry.proto.logs.v1.InstrumentationLibraryLogs") + proto.RegisterType((*LogRecord)(nil), "opentelemetry.proto.logs.v1.LogRecord") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/logs/v1/logs.proto", fileDescriptor_d1c030a3ec7e961e) +} + +var fileDescriptor_d1c030a3ec7e961e = []byte{ + // 756 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x95, 0xdf, 0x6f, 0xea, 0x36, + 0x14, 0xc7, 0x97, 0xf2, 0xdb, 0xa5, 0xd4, 0xf3, 0x5a, 0x9a, 0xd2, 0x69, 0x8d, 0xba, 0xad, 0x63, + 0x9d, 0x0a, 0x6a, 0x60, 0xda, 0xb4, 0xed, 0x25, 0xd0, 0x80, 0x50, 0x29, 0x54, 0x06, 0xba, 0x1f, + 0x2f, 0x51, 0x00, 0x8f, 0x45, 0x03, 0x1b, 0x25, 0x0e, 0x82, 0xbf, 0xef, 0xbe, 0x5c, 0xdd, 0xa7, + 0xfb, 0x1f, 0xdd, 0xab, 0x98, 0x1f, 0x2d, 0x28, 0xa6, 0x4f, 0xd8, 0xe7, 0x73, 0xbe, 0x5f, 0x9f, + 0x73, 0x44, 0x6c, 0x70, 0xcd, 0xa6, 0x84, 0x72, 0x32, 0x26, 0x13, 0xc2, 0xdd, 0x45, 0x71, 0xea, + 0x32, 0xce, 0x8a, 0x63, 0x36, 0xf2, 0x8a, 0xb3, 0x3b, 0xf1, 0x5b, 0x10, 0x21, 0x74, 0xb1, 0x95, + 0xb7, 0x0c, 0x16, 0x04, 0x9f, 0xdd, 0xe5, 0x6e, 0xc2, 0x4c, 0x06, 0x6c, 0x32, 0x61, 0x34, 0xb0, + 0x59, 0xae, 0x96, 0x9a, 0x5c, 0x21, 0x2c, 0xd7, 0x25, 0x1e, 0xf3, 0xdd, 0x01, 0x09, 0xb2, 0xd7, + 0xeb, 0x65, 0xfe, 0xd5, 0x47, 0x05, 0xa4, 0xf1, 0x2a, 0xd4, 0x64, 0x23, 0x0f, 0x99, 0x20, 0xb9, + 0x4e, 0x51, 0x15, 0x4d, 0xc9, 0x1f, 0xea, 0x3f, 0x16, 0xc2, 0x8a, 0xdb, 0xf8, 0xcc, 0xee, 0x0a, + 0x6b, 0x03, 0xbc, 0x91, 0xa2, 0x05, 0xf8, 0xda, 0xa1, 0x1e, 0x77, 0xfd, 0x09, 0xa1, 0xdc, 0xe6, + 0x0e, 0xa3, 0xd6, 0xd8, 0xe9, 0xbb, 0xb6, 0xbb, 0xb0, 0x82, 0xb6, 0xd4, 0x03, 0x2d, 0x92, 0x3f, + 0xd4, 0x7f, 0x29, 0xec, 0xe9, 0xbb, 0xd0, 0xd8, 0x36, 0x68, 0x2e, 0xf5, 0x41, 0x95, 0x38, 0xe7, + 0x48, 0xd9, 0xd5, 0x7b, 0x05, 0xe4, 0xe4, 0x52, 0x44, 0xc1, 0x99, 0xa4, 0xb2, 0x55, 0xbf, 0x3f, + 0x87, 0x16, 0xb5, 0x9a, 0xb2, 0xb4, 0x2c, 0x9c, 0x0d, 0x2f, 0x09, 0xfd, 0x06, 0xa2, 0xaf, 0x3a, + 0xbe, 0xde, 0xdb, 0x71, 0x93, 0x8d, 0x30, 0x19, 0x30, 0x77, 0x88, 0x85, 0xe6, 0xea, 0x43, 0x04, + 0xa4, 0x36, 0x31, 0xf4, 0x1d, 0xc8, 0x70, 0x67, 0x42, 0x2c, 0x9f, 0x3a, 0x73, 0x8b, 0xda, 0x94, + 0x89, 0x82, 0xe3, 0x38, 0x1d, 0x44, 0x7b, 0xd4, 0x99, 0xb7, 0x6c, 0xca, 0x50, 0x17, 0x1c, 0x7b, + 0x64, 0x46, 0x5c, 0x87, 0x2f, 0x2c, 0xea, 0x4f, 0xfa, 0xc4, 0x55, 0x0f, 0x34, 0x25, 0x9f, 0xd1, + 0x7f, 0xda, 0x7b, 0x74, 0x67, 0xa5, 0x69, 0x09, 0x09, 0xce, 0x78, 0x5b, 0x7b, 0xf4, 0x2d, 0x38, + 0xda, 0xb8, 0x72, 0x32, 0xe7, 0x6a, 0x44, 0x53, 0xf2, 0x29, 0x9c, 0x5e, 0x07, 0xbb, 0x64, 0xce, + 0x11, 0x02, 0x51, 0x6a, 0x4f, 0x88, 0x1a, 0x15, 0x4c, 0xac, 0xd1, 0xef, 0x20, 0xda, 0x67, 0xc3, + 0x85, 0x1a, 0x13, 0xb3, 0xfd, 0xe1, 0x8d, 0xd9, 0x1a, 0x74, 0xf1, 0x6c, 0x8f, 0x7d, 0x82, 0x85, + 0x08, 0xd5, 0x01, 0xb0, 0x39, 0x77, 0x9d, 0xbe, 0xcf, 0x89, 0xa7, 0xc6, 0xc5, 0x04, 0xdf, 0xb2, + 0x78, 0x20, 0x2b, 0x8b, 0x57, 0x52, 0xf4, 0x2b, 0x50, 0x87, 0x2e, 0x9b, 0x4e, 0xc9, 0xd0, 0x7a, + 0x89, 0x5a, 0x03, 0xe6, 0x53, 0xae, 0x26, 0x34, 0x25, 0x7f, 0x84, 0xb3, 0x2b, 0x6e, 0x6c, 0x70, + 0x35, 0xa0, 0xe8, 0x04, 0xc4, 0xfe, 0x1d, 0xdb, 0x23, 0x4f, 0x4d, 0x6a, 0x4a, 0x3e, 0x81, 0x97, + 0x1b, 0x74, 0x0e, 0x92, 0xdc, 0xb5, 0x07, 0xc4, 0x72, 0x86, 0x6a, 0x4a, 0x53, 0xf2, 0x69, 0x9c, + 0x10, 0xfb, 0xc6, 0x10, 0x9d, 0x81, 0x84, 0x37, 0xb5, 0x69, 0x40, 0x80, 0x20, 0xf1, 0x60, 0xdb, + 0x18, 0xde, 0xbc, 0x8b, 0x81, 0xcc, 0xf6, 0x94, 0xd1, 0x25, 0xb8, 0xe8, 0x98, 0xcf, 0x26, 0x6e, + 0x74, 0xff, 0xb6, 0x5a, 0xbd, 0xc7, 0x8a, 0x89, 0xad, 0x5e, 0xab, 0xf3, 0x64, 0x56, 0x1b, 0xb5, + 0x86, 0x79, 0x0f, 0xbf, 0x40, 0xe7, 0xe0, 0x74, 0x37, 0xa1, 0x8b, 0x8d, 0xaa, 0x09, 0x15, 0x94, + 0x03, 0xd9, 0x50, 0xa4, 0xc3, 0x03, 0x29, 0x2b, 0xc1, 0x88, 0x94, 0x95, 0x61, 0x34, 0xec, 0xb8, + 0x7b, 0xb3, 0xd2, 0xab, 0xc3, 0x58, 0x98, 0x4c, 0x20, 0x1d, 0xc6, 0xa5, 0xac, 0x04, 0x13, 0x52, + 0x56, 0x86, 0x49, 0xa4, 0x82, 0x93, 0x5d, 0xd6, 0x68, 0xd5, 0xda, 0x30, 0x15, 0x56, 0x48, 0x40, + 0x74, 0x08, 0x64, 0xa8, 0x04, 0x0f, 0x65, 0xa8, 0x0c, 0xd3, 0x61, 0x47, 0xfd, 0x69, 0xe0, 0x16, + 0x3c, 0x0a, 0x13, 0x05, 0x44, 0x87, 0x19, 0x19, 0x2a, 0xc1, 0x63, 0x19, 0x2a, 0x43, 0x18, 0x86, + 0x4c, 0x8c, 0xdb, 0x18, 0x7e, 0x19, 0x36, 0x0c, 0x81, 0x74, 0x88, 0xa4, 0xac, 0x04, 0xbf, 0x92, + 0xb2, 0x32, 0x3c, 0x09, 0x3b, 0xae, 0x66, 0x74, 0x8d, 0x26, 0x3c, 0x0d, 0x93, 0x09, 0xa4, 0xc3, + 0xac, 0x94, 0x95, 0xe0, 0x99, 0x94, 0x95, 0xa1, 0x7a, 0xf3, 0x17, 0xc8, 0x6c, 0x6e, 0xa4, 0x9a, + 0xf8, 0x16, 0x2e, 0xc1, 0x45, 0xb3, 0x5d, 0xb7, 0xb0, 0x59, 0x6d, 0xe3, 0x7b, 0xab, 0xd6, 0x34, + 0xea, 0x3b, 0x7f, 0xe2, 0xef, 0x81, 0xb6, 0x9b, 0x20, 0xfe, 0x71, 0x62, 0xd9, 0xb1, 0x1e, 0x8d, + 0xce, 0x03, 0xfc, 0xa4, 0x54, 0xfe, 0x07, 0xdf, 0x38, 0x6c, 0xdf, 0x1d, 0x55, 0x09, 0xee, 0x42, + 0xef, 0x29, 0x08, 0x3d, 0x29, 0xff, 0xfc, 0x31, 0x72, 0xf8, 0x7f, 0x7e, 0x3f, 0xf8, 0xf2, 0x8b, + 0x81, 0xe8, 0xf6, 0xe5, 0xd5, 0xdb, 0xf2, 0xb8, 0x5d, 0xbe, 0x81, 0x23, 0x42, 0x8b, 0xa3, 0xcd, + 0xdb, 0xdb, 0x8f, 0x8b, 0x68, 0xe9, 0x73, 0x00, 0x00, 0x00, 0xff, 0xff, 0x03, 0x3d, 0xc3, 0x0c, + 0xa1, 0x07, 0x00, 0x00, +} diff --git a/internal/opentelemetry-proto-gen/metrics/experimental/configservice.pb.go b/internal/opentelemetry-proto-gen/metrics/experimental/configservice.pb.go new file mode 100644 index 0000000000..eebb0aa75e --- /dev/null +++ b/internal/opentelemetry-proto-gen/metrics/experimental/configservice.pb.go @@ -0,0 +1,423 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/metrics/experimental/configservice.proto + +package experimental + +import ( + context "context" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/resource/v1" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type MetricConfigRequest struct { + // Required. The resource for which configuration should be returned. + Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // Optional. The value of MetricConfigResponse.fingerprint for the last + // configuration that the caller received and successfully applied. + LastKnownFingerprint []byte `protobuf:"bytes,2,opt,name=last_known_fingerprint,json=lastKnownFingerprint,proto3" json:"last_known_fingerprint,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricConfigRequest) Reset() { *m = MetricConfigRequest{} } +func (m *MetricConfigRequest) String() string { return proto.CompactTextString(m) } +func (*MetricConfigRequest) ProtoMessage() {} +func (*MetricConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_79b5d4ea55caf90b, []int{0} +} +func (m *MetricConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricConfigRequest.Unmarshal(m, b) +} +func (m *MetricConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricConfigRequest.Marshal(b, m, deterministic) +} +func (m *MetricConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricConfigRequest.Merge(m, src) +} +func (m *MetricConfigRequest) XXX_Size() int { + return xxx_messageInfo_MetricConfigRequest.Size(m) +} +func (m *MetricConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MetricConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricConfigRequest proto.InternalMessageInfo + +func (m *MetricConfigRequest) GetResource() *v1.Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *MetricConfigRequest) GetLastKnownFingerprint() []byte { + if m != nil { + return m.LastKnownFingerprint + } + return nil +} + +type MetricConfigResponse struct { + // Optional. The fingerprint associated with this MetricConfigResponse. Each + // change in configs yields a different fingerprint. The resource SHOULD copy + // this value to MetricConfigRequest.last_known_fingerprint for the next + // configuration request. If there are no changes between fingerprint and + // MetricConfigRequest.last_known_fingerprint, then all other fields besides + // fingerprint in the response are optional, or the same as the last update if + // present. + // + // The exact mechanics of generating the fingerprint is up to the + // implementation. However, a fingerprint must be deterministically determined + // by the configurations -- the same configuration will generate the same + // fingerprint on any instance of an implementation. Hence using a timestamp is + // unacceptable, but a deterministic hash is fine. + Fingerprint []byte `protobuf:"bytes,1,opt,name=fingerprint,proto3" json:"fingerprint,omitempty"` + // A single metric may match multiple schedules. In such cases, the schedule + // that specifies the smallest period is applied. + // + // Note, for optimization purposes, it is recommended to use as few schedules + // as possible to capture all required metric updates. Where you can be + // conservative, do take full advantage of the inclusion/exclusion patterns to + // capture as much of your targeted metrics. + Schedules []*MetricConfigResponse_Schedule `protobuf:"bytes,2,rep,name=schedules,proto3" json:"schedules,omitempty"` + // Optional. The client is suggested to wait this long (in seconds) before + // pinging the configuration service again. + SuggestedWaitTimeSec int32 `protobuf:"varint,3,opt,name=suggested_wait_time_sec,json=suggestedWaitTimeSec,proto3" json:"suggested_wait_time_sec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricConfigResponse) Reset() { *m = MetricConfigResponse{} } +func (m *MetricConfigResponse) String() string { return proto.CompactTextString(m) } +func (*MetricConfigResponse) ProtoMessage() {} +func (*MetricConfigResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_79b5d4ea55caf90b, []int{1} +} +func (m *MetricConfigResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricConfigResponse.Unmarshal(m, b) +} +func (m *MetricConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricConfigResponse.Marshal(b, m, deterministic) +} +func (m *MetricConfigResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricConfigResponse.Merge(m, src) +} +func (m *MetricConfigResponse) XXX_Size() int { + return xxx_messageInfo_MetricConfigResponse.Size(m) +} +func (m *MetricConfigResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MetricConfigResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricConfigResponse proto.InternalMessageInfo + +func (m *MetricConfigResponse) GetFingerprint() []byte { + if m != nil { + return m.Fingerprint + } + return nil +} + +func (m *MetricConfigResponse) GetSchedules() []*MetricConfigResponse_Schedule { + if m != nil { + return m.Schedules + } + return nil +} + +func (m *MetricConfigResponse) GetSuggestedWaitTimeSec() int32 { + if m != nil { + return m.SuggestedWaitTimeSec + } + return 0 +} + +// A Schedule is used to apply a particular scheduling configuration to +// a metric. If a metric name matches a schedule's patterns, then the metric +// adopts the configuration specified by the schedule. +type MetricConfigResponse_Schedule struct { + // Metrics with names that match a rule in the inclusion_patterns are + // targeted by this schedule. Metrics that match the exclusion_patterns + // are not targeted for this schedule, even if they match an inclusion + // pattern. + ExclusionPatterns []*MetricConfigResponse_Schedule_Pattern `protobuf:"bytes,1,rep,name=exclusion_patterns,json=exclusionPatterns,proto3" json:"exclusion_patterns,omitempty"` + InclusionPatterns []*MetricConfigResponse_Schedule_Pattern `protobuf:"bytes,2,rep,name=inclusion_patterns,json=inclusionPatterns,proto3" json:"inclusion_patterns,omitempty"` + // Describes the collection period for each metric in seconds. + // A period of 0 means to not export. + PeriodSec int32 `protobuf:"varint,3,opt,name=period_sec,json=periodSec,proto3" json:"period_sec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricConfigResponse_Schedule) Reset() { *m = MetricConfigResponse_Schedule{} } +func (m *MetricConfigResponse_Schedule) String() string { return proto.CompactTextString(m) } +func (*MetricConfigResponse_Schedule) ProtoMessage() {} +func (*MetricConfigResponse_Schedule) Descriptor() ([]byte, []int) { + return fileDescriptor_79b5d4ea55caf90b, []int{1, 0} +} +func (m *MetricConfigResponse_Schedule) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricConfigResponse_Schedule.Unmarshal(m, b) +} +func (m *MetricConfigResponse_Schedule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricConfigResponse_Schedule.Marshal(b, m, deterministic) +} +func (m *MetricConfigResponse_Schedule) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricConfigResponse_Schedule.Merge(m, src) +} +func (m *MetricConfigResponse_Schedule) XXX_Size() int { + return xxx_messageInfo_MetricConfigResponse_Schedule.Size(m) +} +func (m *MetricConfigResponse_Schedule) XXX_DiscardUnknown() { + xxx_messageInfo_MetricConfigResponse_Schedule.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricConfigResponse_Schedule proto.InternalMessageInfo + +func (m *MetricConfigResponse_Schedule) GetExclusionPatterns() []*MetricConfigResponse_Schedule_Pattern { + if m != nil { + return m.ExclusionPatterns + } + return nil +} + +func (m *MetricConfigResponse_Schedule) GetInclusionPatterns() []*MetricConfigResponse_Schedule_Pattern { + if m != nil { + return m.InclusionPatterns + } + return nil +} + +func (m *MetricConfigResponse_Schedule) GetPeriodSec() int32 { + if m != nil { + return m.PeriodSec + } + return 0 +} + +// A light-weight pattern that can match 1 or more +// metrics, for which this schedule will apply. The string is used to +// match against metric names. It should not exceed 100k characters. +type MetricConfigResponse_Schedule_Pattern struct { + // Types that are valid to be assigned to Match: + // *MetricConfigResponse_Schedule_Pattern_Equals + // *MetricConfigResponse_Schedule_Pattern_StartsWith + Match isMetricConfigResponse_Schedule_Pattern_Match `protobuf_oneof:"match"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricConfigResponse_Schedule_Pattern) Reset() { *m = MetricConfigResponse_Schedule_Pattern{} } +func (m *MetricConfigResponse_Schedule_Pattern) String() string { return proto.CompactTextString(m) } +func (*MetricConfigResponse_Schedule_Pattern) ProtoMessage() {} +func (*MetricConfigResponse_Schedule_Pattern) Descriptor() ([]byte, []int) { + return fileDescriptor_79b5d4ea55caf90b, []int{1, 0, 0} +} +func (m *MetricConfigResponse_Schedule_Pattern) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricConfigResponse_Schedule_Pattern.Unmarshal(m, b) +} +func (m *MetricConfigResponse_Schedule_Pattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricConfigResponse_Schedule_Pattern.Marshal(b, m, deterministic) +} +func (m *MetricConfigResponse_Schedule_Pattern) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricConfigResponse_Schedule_Pattern.Merge(m, src) +} +func (m *MetricConfigResponse_Schedule_Pattern) XXX_Size() int { + return xxx_messageInfo_MetricConfigResponse_Schedule_Pattern.Size(m) +} +func (m *MetricConfigResponse_Schedule_Pattern) XXX_DiscardUnknown() { + xxx_messageInfo_MetricConfigResponse_Schedule_Pattern.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricConfigResponse_Schedule_Pattern proto.InternalMessageInfo + +type isMetricConfigResponse_Schedule_Pattern_Match interface { + isMetricConfigResponse_Schedule_Pattern_Match() +} + +type MetricConfigResponse_Schedule_Pattern_Equals struct { + Equals string `protobuf:"bytes,1,opt,name=equals,proto3,oneof" json:"equals,omitempty"` +} +type MetricConfigResponse_Schedule_Pattern_StartsWith struct { + StartsWith string `protobuf:"bytes,2,opt,name=starts_with,json=startsWith,proto3,oneof" json:"starts_with,omitempty"` +} + +func (*MetricConfigResponse_Schedule_Pattern_Equals) isMetricConfigResponse_Schedule_Pattern_Match() { +} +func (*MetricConfigResponse_Schedule_Pattern_StartsWith) isMetricConfigResponse_Schedule_Pattern_Match() { +} + +func (m *MetricConfigResponse_Schedule_Pattern) GetMatch() isMetricConfigResponse_Schedule_Pattern_Match { + if m != nil { + return m.Match + } + return nil +} + +func (m *MetricConfigResponse_Schedule_Pattern) GetEquals() string { + if x, ok := m.GetMatch().(*MetricConfigResponse_Schedule_Pattern_Equals); ok { + return x.Equals + } + return "" +} + +func (m *MetricConfigResponse_Schedule_Pattern) GetStartsWith() string { + if x, ok := m.GetMatch().(*MetricConfigResponse_Schedule_Pattern_StartsWith); ok { + return x.StartsWith + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*MetricConfigResponse_Schedule_Pattern) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*MetricConfigResponse_Schedule_Pattern_Equals)(nil), + (*MetricConfigResponse_Schedule_Pattern_StartsWith)(nil), + } +} + +func init() { + proto.RegisterType((*MetricConfigRequest)(nil), "opentelemetry.proto.metrics.experimental.MetricConfigRequest") + proto.RegisterType((*MetricConfigResponse)(nil), "opentelemetry.proto.metrics.experimental.MetricConfigResponse") + proto.RegisterType((*MetricConfigResponse_Schedule)(nil), "opentelemetry.proto.metrics.experimental.MetricConfigResponse.Schedule") + proto.RegisterType((*MetricConfigResponse_Schedule_Pattern)(nil), "opentelemetry.proto.metrics.experimental.MetricConfigResponse.Schedule.Pattern") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/metrics/experimental/configservice.proto", fileDescriptor_79b5d4ea55caf90b) +} + +var fileDescriptor_79b5d4ea55caf90b = []byte{ + // 499 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x94, 0x4d, 0x6f, 0xd3, 0x4c, + 0x10, 0xc7, 0x9f, 0x4d, 0x9f, 0xbe, 0x64, 0x52, 0x09, 0xb1, 0x44, 0x60, 0x45, 0x42, 0x0a, 0x3d, + 0x05, 0xa1, 0xae, 0xd5, 0x00, 0x37, 0xe0, 0x10, 0x04, 0x05, 0x21, 0xd4, 0xc8, 0x41, 0xaa, 0xc4, + 0xc5, 0x72, 0x9d, 0xa9, 0xbd, 0xc2, 0xde, 0x75, 0x77, 0xc7, 0x49, 0xb9, 0xf0, 0x19, 0x10, 0xe2, + 0x0b, 0xf0, 0x99, 0xf8, 0x36, 0x9c, 0x90, 0x5f, 0xea, 0x38, 0x22, 0x87, 0x8a, 0x97, 0xdb, 0xe4, + 0x3f, 0x33, 0xbf, 0xff, 0x64, 0x6c, 0x0f, 0x3c, 0xd1, 0x19, 0x2a, 0xc2, 0x04, 0x53, 0x24, 0xf3, + 0xd1, 0xcd, 0x8c, 0x26, 0xed, 0x16, 0xb1, 0x0c, 0xad, 0x8b, 0x97, 0x19, 0x1a, 0x99, 0xa2, 0xa2, + 0x20, 0x71, 0x43, 0xad, 0xce, 0x65, 0x64, 0xd1, 0x2c, 0x64, 0x88, 0xa2, 0x2c, 0xe4, 0xa3, 0xb5, + 0xee, 0x4a, 0x14, 0x75, 0xb7, 0x68, 0x77, 0x0f, 0xc4, 0x26, 0x1f, 0x83, 0x56, 0xe7, 0x26, 0x44, + 0x77, 0x71, 0xd4, 0xc4, 0x15, 0xe4, 0xe0, 0x0b, 0x83, 0x5b, 0x6f, 0x4b, 0xd0, 0xf3, 0xd2, 0xd7, + 0xc3, 0x8b, 0x1c, 0x2d, 0xf1, 0x17, 0xb0, 0x77, 0x55, 0xe9, 0xb0, 0x21, 0x1b, 0xf5, 0xc6, 0xf7, + 0xc5, 0xa6, 0x21, 0x1a, 0xdc, 0xe2, 0x48, 0x78, 0x75, 0xec, 0x35, 0xad, 0xfc, 0x11, 0xdc, 0x4e, + 0x02, 0x4b, 0xfe, 0x07, 0xa5, 0x97, 0xca, 0x3f, 0x97, 0x2a, 0x42, 0x93, 0x19, 0xa9, 0xc8, 0xe9, + 0x0c, 0xd9, 0x68, 0xdf, 0xeb, 0x17, 0xd9, 0x37, 0x45, 0xf2, 0xe5, 0x2a, 0x77, 0xf0, 0xfd, 0x7f, + 0xe8, 0xaf, 0x0f, 0x65, 0x33, 0xad, 0x2c, 0xf2, 0x21, 0xf4, 0xda, 0x0c, 0x56, 0x32, 0xda, 0x12, + 0x47, 0xe8, 0xda, 0x30, 0xc6, 0x79, 0x9e, 0xa0, 0x75, 0x3a, 0xc3, 0xad, 0x51, 0x6f, 0x7c, 0x2c, + 0xae, 0xbb, 0x3d, 0xb1, 0xc9, 0x54, 0xcc, 0x6a, 0x9e, 0xb7, 0x22, 0xf3, 0xc7, 0x70, 0xc7, 0xe6, + 0x51, 0x84, 0x96, 0x70, 0xee, 0x2f, 0x03, 0x49, 0x3e, 0xc9, 0x14, 0x7d, 0x8b, 0xa1, 0xb3, 0x35, + 0x64, 0xa3, 0x6d, 0xaf, 0xdf, 0xa4, 0x4f, 0x03, 0x49, 0xef, 0x64, 0x8a, 0x33, 0x0c, 0x07, 0x3f, + 0x3a, 0xb0, 0x77, 0x85, 0xe3, 0x9f, 0x80, 0xe3, 0x65, 0x98, 0xe4, 0x56, 0x6a, 0xe5, 0x67, 0x01, + 0x11, 0x1a, 0x65, 0x1d, 0x56, 0xce, 0x7c, 0xf2, 0x97, 0x66, 0x16, 0xd3, 0x8a, 0xeb, 0xdd, 0x6c, + 0xac, 0x6a, 0xc5, 0x16, 0xfe, 0x52, 0xfd, 0xe2, 0xdf, 0xf9, 0x47, 0xfe, 0x8d, 0x55, 0xe3, 0x7f, + 0x17, 0xa0, 0xc0, 0xe8, 0x79, 0x6b, 0x6d, 0xdd, 0x4a, 0x29, 0x76, 0x75, 0x02, 0xbb, 0x75, 0x29, + 0x77, 0x60, 0x07, 0x2f, 0xf2, 0x20, 0xb1, 0xe5, 0x13, 0xef, 0xbe, 0xfa, 0xcf, 0xab, 0x7f, 0xf3, + 0x7b, 0xd0, 0xb3, 0x14, 0x18, 0xb2, 0xfe, 0x52, 0x52, 0x5c, 0xbe, 0x54, 0x45, 0x1a, 0x2a, 0xf1, + 0x54, 0x52, 0x3c, 0xd9, 0x85, 0xed, 0x34, 0xa0, 0x30, 0x1e, 0x7f, 0x63, 0xb0, 0xdf, 0x1e, 0x96, + 0x7f, 0x66, 0x70, 0xe3, 0x18, 0x69, 0x4d, 0x7b, 0xfa, 0xbb, 0x7f, 0xbc, 0xfc, 0x6c, 0x06, 0xcf, + 0xfe, 0x6c, 0x6f, 0x93, 0xaf, 0x0c, 0x1e, 0x48, 0x7d, 0x6d, 0xc8, 0xc4, 0x69, 0x53, 0x66, 0xd5, + 0xcd, 0x98, 0x16, 0xe5, 0x53, 0xf6, 0xfe, 0x75, 0x24, 0x29, 0xce, 0xcf, 0x44, 0xa8, 0x53, 0xb7, + 0x00, 0x1e, 0xae, 0xce, 0xc2, 0x1a, 0xff, 0xb0, 0x3a, 0x12, 0x11, 0x2a, 0x37, 0xda, 0x7c, 0x93, + 0xce, 0x76, 0xca, 0x92, 0x87, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x51, 0x6b, 0xa4, 0x34, 0xc6, + 0x04, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MetricConfigClient is the client API for MetricConfig service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetricConfigClient interface { + GetMetricConfig(ctx context.Context, in *MetricConfigRequest, opts ...grpc.CallOption) (*MetricConfigResponse, error) +} + +type metricConfigClient struct { + cc *grpc.ClientConn +} + +func NewMetricConfigClient(cc *grpc.ClientConn) MetricConfigClient { + return &metricConfigClient{cc} +} + +func (c *metricConfigClient) GetMetricConfig(ctx context.Context, in *MetricConfigRequest, opts ...grpc.CallOption) (*MetricConfigResponse, error) { + out := new(MetricConfigResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.metrics.experimental.MetricConfig/GetMetricConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MetricConfigServer is the server API for MetricConfig service. +type MetricConfigServer interface { + GetMetricConfig(context.Context, *MetricConfigRequest) (*MetricConfigResponse, error) +} + +// UnimplementedMetricConfigServer can be embedded to have forward compatible implementations. +type UnimplementedMetricConfigServer struct { +} + +func (*UnimplementedMetricConfigServer) GetMetricConfig(ctx context.Context, req *MetricConfigRequest) (*MetricConfigResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetMetricConfig not implemented") +} + +func RegisterMetricConfigServer(s *grpc.Server, srv MetricConfigServer) { + s.RegisterService(&_MetricConfig_serviceDesc, srv) +} + +func _MetricConfig_GetMetricConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MetricConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricConfigServer).GetMetricConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.metrics.experimental.MetricConfig/GetMetricConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricConfigServer).GetMetricConfig(ctx, req.(*MetricConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _MetricConfig_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.metrics.experimental.MetricConfig", + HandlerType: (*MetricConfigServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetMetricConfig", + Handler: _MetricConfig_GetMetricConfig_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/metrics/experimental/configservice.proto", +} diff --git a/internal/opentelemetry-proto-gen/metrics/v1/metrics.pb.go b/internal/opentelemetry-proto-gen/metrics/v1/metrics.pb.go new file mode 100644 index 0000000000..12df1441fb --- /dev/null +++ b/internal/opentelemetry-proto-gen/metrics/v1/metrics.pb.go @@ -0,0 +1,1501 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/metrics/v1/metrics.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + v11 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" + v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/resource/v1" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// AggregationTemporality defines how a metric aggregator reports aggregated +// values. It describes how those values relate to the time interval over +// which they are aggregated. +type AggregationTemporality int32 + +const ( + // UNSPECIFIED is the default AggregationTemporality, it MUST not be used. + AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0 + // DELTA is an AggregationTemporality for a metric aggregator which reports + // changes since last report time. Successive metrics contain aggregation of + // values from continuous and non-overlapping intervals. + // + // The values for a DELTA metric are based only on the time interval + // associated with one measurement cycle. There is no dependency on + // previous measurements like is the case for CUMULATIVE metrics. + // + // For example, consider a system measuring the number of requests that + // it receives and reports the sum of these requests every second as a + // DELTA metric: + // + // 1. The system starts receiving at time=t_0. + // 2. A request is received, the system measures 1 request. + // 3. A request is received, the system measures 1 request. + // 4. A request is received, the system measures 1 request. + // 5. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+1 with a value of 3. + // 6. A request is received, the system measures 1 request. + // 7. A request is received, the system measures 1 request. + // 8. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0+1 to + // t_0+2 with a value of 2. + AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1 + // CUMULATIVE is an AggregationTemporality for a metic aggregator which + // reports changes since a fixed start time. This means that current values + // of a CUMULATIVE metric depend on all previous measurements since the + // start time. Because of this, the sender is required to retain this state + // in some form. If this state is lost or invalidated, the CUMULATIVE metric + // values MUST be reset and a new fixed start time following the last + // reported measurement time sent MUST be used. + // + // For example, consider a system measuring the number of requests that + // it receives and reports the sum of these requests every second as a + // CUMULATIVE metric: + // + // 1. The system starts receiving at time=t_0. + // 2. A request is received, the system measures 1 request. + // 3. A request is received, the system measures 1 request. + // 4. A request is received, the system measures 1 request. + // 5. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+1 with a value of 3. + // 6. A request is received, the system measures 1 request. + // 7. A request is received, the system measures 1 request. + // 8. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+2 with a value of 5. + // 9. The system experiences a fault and loses state. + // 10. The system recovers and resumes receiving at time=t_1. + // 11. A request is received, the system measures 1 request. + // 12. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_1 to + // t_0+1 with a value of 1. + // + // Note: Even though, when reporting changes since last report time, using + // CUMULATIVE is valid, it is not recommended. This may cause problems for + // systems that do not use start_time to determine when the aggregation + // value was reset (e.g. Prometheus). + AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2 +) + +var AggregationTemporality_name = map[int32]string{ + 0: "AGGREGATION_TEMPORALITY_UNSPECIFIED", + 1: "AGGREGATION_TEMPORALITY_DELTA", + 2: "AGGREGATION_TEMPORALITY_CUMULATIVE", +} + +var AggregationTemporality_value = map[string]int32{ + "AGGREGATION_TEMPORALITY_UNSPECIFIED": 0, + "AGGREGATION_TEMPORALITY_DELTA": 1, + "AGGREGATION_TEMPORALITY_CUMULATIVE": 2, +} + +func (x AggregationTemporality) String() string { + return proto.EnumName(AggregationTemporality_name, int32(x)) +} + +func (AggregationTemporality) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{0} +} + +// A collection of InstrumentationLibraryMetrics from a Resource. +type ResourceMetrics struct { + // The resource for the metrics in this message. + // If this field is not set then no resource info is known. + Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // A list of metrics that originate from a resource. + InstrumentationLibraryMetrics []*InstrumentationLibraryMetrics `protobuf:"bytes,2,rep,name=instrumentation_library_metrics,json=instrumentationLibraryMetrics,proto3" json:"instrumentation_library_metrics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceMetrics) Reset() { *m = ResourceMetrics{} } +func (m *ResourceMetrics) String() string { return proto.CompactTextString(m) } +func (*ResourceMetrics) ProtoMessage() {} +func (*ResourceMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{0} +} +func (m *ResourceMetrics) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourceMetrics.Unmarshal(m, b) +} +func (m *ResourceMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourceMetrics.Marshal(b, m, deterministic) +} +func (m *ResourceMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetrics.Merge(m, src) +} +func (m *ResourceMetrics) XXX_Size() int { + return xxx_messageInfo_ResourceMetrics.Size(m) +} +func (m *ResourceMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetrics proto.InternalMessageInfo + +func (m *ResourceMetrics) GetResource() *v1.Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *ResourceMetrics) GetInstrumentationLibraryMetrics() []*InstrumentationLibraryMetrics { + if m != nil { + return m.InstrumentationLibraryMetrics + } + return nil +} + +// A collection of Metrics produced by an InstrumentationLibrary. +type InstrumentationLibraryMetrics struct { + // The instrumentation library information for the metrics in this message. + // If this field is not set then no library info is known. + InstrumentationLibrary *v11.InstrumentationLibrary `protobuf:"bytes,1,opt,name=instrumentation_library,json=instrumentationLibrary,proto3" json:"instrumentation_library,omitempty"` + // A list of metrics that originate from an instrumentation library. + Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InstrumentationLibraryMetrics) Reset() { *m = InstrumentationLibraryMetrics{} } +func (m *InstrumentationLibraryMetrics) String() string { return proto.CompactTextString(m) } +func (*InstrumentationLibraryMetrics) ProtoMessage() {} +func (*InstrumentationLibraryMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{1} +} +func (m *InstrumentationLibraryMetrics) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InstrumentationLibraryMetrics.Unmarshal(m, b) +} +func (m *InstrumentationLibraryMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InstrumentationLibraryMetrics.Marshal(b, m, deterministic) +} +func (m *InstrumentationLibraryMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_InstrumentationLibraryMetrics.Merge(m, src) +} +func (m *InstrumentationLibraryMetrics) XXX_Size() int { + return xxx_messageInfo_InstrumentationLibraryMetrics.Size(m) +} +func (m *InstrumentationLibraryMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_InstrumentationLibraryMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_InstrumentationLibraryMetrics proto.InternalMessageInfo + +func (m *InstrumentationLibraryMetrics) GetInstrumentationLibrary() *v11.InstrumentationLibrary { + if m != nil { + return m.InstrumentationLibrary + } + return nil +} + +func (m *InstrumentationLibraryMetrics) GetMetrics() []*Metric { + if m != nil { + return m.Metrics + } + return nil +} + +// Defines a Metric which has one or more timeseries. +// +// The data model and relation between entities is shown in the +// diagram below. Here, "DataPoint" is the term used to refer to any +// one of the specific data point value types, and "points" is the term used +// to refer to any one of the lists of points contained in the Metric. +// +// - Metric is composed of a metadata and data. +// - Metadata part contains a name, description, unit. +// - Data is one of the possible types (Gauge, Sum, Histogram, etc.). +// - DataPoint contains timestamps, labels, and one of the possible value type +// fields. +// +// Metric +// +------------+ +// |name | +// |description | +// |unit | +---------------------------+ +// |data |---> |Gauge, Sum, Histogram, ... | +// +------------+ +---------------------------+ +// +// Data [One of Gauge, Sum, Histogram, ...] +// +-----------+ +// |... | // Metadata about the Data. +// |points |--+ +// +-----------+ | +// | +---------------------------+ +// | |DataPoint 1 | +// v |+------+------+ +------+ | +// +-----+ ||label |label |...|label | | +// | 1 |-->||value1|value2|...|valueN| | +// +-----+ |+------+------+ +------+ | +// | . | |+-----+ | +// | . | ||value| | +// | . | |+-----+ | +// | . | +---------------------------+ +// | . | . +// | . | . +// | . | . +// | . | +---------------------------+ +// | . | |DataPoint M | +// +-----+ |+------+------+ +------+ | +// | M |-->||label |label |...|label | | +// +-----+ ||value1|value2|...|valueN| | +// |+------+------+ +------+ | +// |+-----+ | +// ||value| | +// |+-----+ | +// +---------------------------+ +// +// All DataPoint types have three common fields: +// - Labels zero or more key-value pairs associated with the data point. +// - StartTimeUnixNano MUST be set to the start of the interval when the data's +// type includes an AggregationTemporality. This field is not set otherwise. +// - TimeUnixNano MUST be set to: +// - the moment when an aggregation is reported (independent of the +// aggregation temporality). +// - the instantaneous time of the event. +type Metric struct { + // name of the metric, including its DNS name prefix. It must be unique. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // description of the metric, which can be used in documentation. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // unit in which the metric value is reported. Follows the format + // described by http://unitsofmeasure.org/ucum.html. + Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` + // Data determines the aggregation type (if any) of the metric, what is the + // reported value type for the data points, as well as the relatationship to + // the time interval over which they are reported. + // + // TODO: Update table after the decision on: + // https://github.com/open-telemetry/opentelemetry-specification/issues/731. + // By default, metrics recording using the OpenTelemetry API are exported as + // (the table does not include MeasurementValueType to avoid extra rows): + // + // Instrument Type + // ---------------------------------------------- + // Counter Sum(aggregation_temporality=delta;is_monotonic=true) + // UpDownCounter Sum(aggregation_temporality=delta;is_monotonic=false) + // ValueRecorder TBD + // SumObserver Sum(aggregation_temporality=cumulative;is_monotonic=true) + // UpDownSumObserver Sum(aggregation_temporality=cumulative;is_monotonic=false) + // ValueObserver Gauge() + // + // Types that are valid to be assigned to Data: + // *Metric_IntGauge + // *Metric_DoubleGauge + // *Metric_IntSum + // *Metric_DoubleSum + // *Metric_IntHistogram + // *Metric_DoubleHistogram + Data isMetric_Data `protobuf_oneof:"data"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{2} +} +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +type isMetric_Data interface { + isMetric_Data() +} + +type Metric_IntGauge struct { + IntGauge *IntGauge `protobuf:"bytes,4,opt,name=int_gauge,json=intGauge,proto3,oneof" json:"int_gauge,omitempty"` +} +type Metric_DoubleGauge struct { + DoubleGauge *DoubleGauge `protobuf:"bytes,5,opt,name=double_gauge,json=doubleGauge,proto3,oneof" json:"double_gauge,omitempty"` +} +type Metric_IntSum struct { + IntSum *IntSum `protobuf:"bytes,6,opt,name=int_sum,json=intSum,proto3,oneof" json:"int_sum,omitempty"` +} +type Metric_DoubleSum struct { + DoubleSum *DoubleSum `protobuf:"bytes,7,opt,name=double_sum,json=doubleSum,proto3,oneof" json:"double_sum,omitempty"` +} +type Metric_IntHistogram struct { + IntHistogram *IntHistogram `protobuf:"bytes,8,opt,name=int_histogram,json=intHistogram,proto3,oneof" json:"int_histogram,omitempty"` +} +type Metric_DoubleHistogram struct { + DoubleHistogram *DoubleHistogram `protobuf:"bytes,9,opt,name=double_histogram,json=doubleHistogram,proto3,oneof" json:"double_histogram,omitempty"` +} + +func (*Metric_IntGauge) isMetric_Data() {} +func (*Metric_DoubleGauge) isMetric_Data() {} +func (*Metric_IntSum) isMetric_Data() {} +func (*Metric_DoubleSum) isMetric_Data() {} +func (*Metric_IntHistogram) isMetric_Data() {} +func (*Metric_DoubleHistogram) isMetric_Data() {} + +func (m *Metric) GetData() isMetric_Data { + if m != nil { + return m.Data + } + return nil +} + +func (m *Metric) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Metric) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Metric) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +func (m *Metric) GetIntGauge() *IntGauge { + if x, ok := m.GetData().(*Metric_IntGauge); ok { + return x.IntGauge + } + return nil +} + +func (m *Metric) GetDoubleGauge() *DoubleGauge { + if x, ok := m.GetData().(*Metric_DoubleGauge); ok { + return x.DoubleGauge + } + return nil +} + +func (m *Metric) GetIntSum() *IntSum { + if x, ok := m.GetData().(*Metric_IntSum); ok { + return x.IntSum + } + return nil +} + +func (m *Metric) GetDoubleSum() *DoubleSum { + if x, ok := m.GetData().(*Metric_DoubleSum); ok { + return x.DoubleSum + } + return nil +} + +func (m *Metric) GetIntHistogram() *IntHistogram { + if x, ok := m.GetData().(*Metric_IntHistogram); ok { + return x.IntHistogram + } + return nil +} + +func (m *Metric) GetDoubleHistogram() *DoubleHistogram { + if x, ok := m.GetData().(*Metric_DoubleHistogram); ok { + return x.DoubleHistogram + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Metric) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Metric_IntGauge)(nil), + (*Metric_DoubleGauge)(nil), + (*Metric_IntSum)(nil), + (*Metric_DoubleSum)(nil), + (*Metric_IntHistogram)(nil), + (*Metric_DoubleHistogram)(nil), + } +} + +// Gauge represents the type of a int scalar metric that always exports the +// "current value" for every data point. It should be used for an "unknown" +// aggregation. +// +// A Gauge does not support different aggregation temporalities. Given the +// aggregation is unknown, points cannot be combined using the same +// aggregation, regardless of aggregation temporalities. Therefore, +// AggregationTemporality is not included. Consequently, this also means +// "StartTimeUnixNano" is ignored for all data points. +type IntGauge struct { + DataPoints []*IntDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IntGauge) Reset() { *m = IntGauge{} } +func (m *IntGauge) String() string { return proto.CompactTextString(m) } +func (*IntGauge) ProtoMessage() {} +func (*IntGauge) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{3} +} +func (m *IntGauge) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IntGauge.Unmarshal(m, b) +} +func (m *IntGauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IntGauge.Marshal(b, m, deterministic) +} +func (m *IntGauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntGauge.Merge(m, src) +} +func (m *IntGauge) XXX_Size() int { + return xxx_messageInfo_IntGauge.Size(m) +} +func (m *IntGauge) XXX_DiscardUnknown() { + xxx_messageInfo_IntGauge.DiscardUnknown(m) +} + +var xxx_messageInfo_IntGauge proto.InternalMessageInfo + +func (m *IntGauge) GetDataPoints() []*IntDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +// Gauge represents the type of a double scalar metric that always exports the +// "current value" for every data point. It should be used for an "unknown" +// aggregation. +// +// A Gauge does not support different aggregation temporalities. Given the +// aggregation is unknown, points cannot be combined using the same +// aggregation, regardless of aggregation temporalities. Therefore, +// AggregationTemporality is not included. Consequently, this also means +// "StartTimeUnixNano" is ignored for all data points. +type DoubleGauge struct { + DataPoints []*DoubleDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleGauge) Reset() { *m = DoubleGauge{} } +func (m *DoubleGauge) String() string { return proto.CompactTextString(m) } +func (*DoubleGauge) ProtoMessage() {} +func (*DoubleGauge) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{4} +} +func (m *DoubleGauge) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleGauge.Unmarshal(m, b) +} +func (m *DoubleGauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleGauge.Marshal(b, m, deterministic) +} +func (m *DoubleGauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleGauge.Merge(m, src) +} +func (m *DoubleGauge) XXX_Size() int { + return xxx_messageInfo_DoubleGauge.Size(m) +} +func (m *DoubleGauge) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleGauge.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleGauge proto.InternalMessageInfo + +func (m *DoubleGauge) GetDataPoints() []*DoubleDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +// Sum represents the type of a numeric int scalar metric that is calculated as +// a sum of all reported measurements over a time interval. +type IntSum struct { + DataPoints []*IntDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` + // If "true" means that the sum is monotonic. + IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IntSum) Reset() { *m = IntSum{} } +func (m *IntSum) String() string { return proto.CompactTextString(m) } +func (*IntSum) ProtoMessage() {} +func (*IntSum) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{5} +} +func (m *IntSum) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IntSum.Unmarshal(m, b) +} +func (m *IntSum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IntSum.Marshal(b, m, deterministic) +} +func (m *IntSum) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntSum.Merge(m, src) +} +func (m *IntSum) XXX_Size() int { + return xxx_messageInfo_IntSum.Size(m) +} +func (m *IntSum) XXX_DiscardUnknown() { + xxx_messageInfo_IntSum.DiscardUnknown(m) +} + +var xxx_messageInfo_IntSum proto.InternalMessageInfo + +func (m *IntSum) GetDataPoints() []*IntDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +func (m *IntSum) GetAggregationTemporality() AggregationTemporality { + if m != nil { + return m.AggregationTemporality + } + return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +} + +func (m *IntSum) GetIsMonotonic() bool { + if m != nil { + return m.IsMonotonic + } + return false +} + +// Sum represents the type of a numeric double scalar metric that is calculated +// as a sum of all reported measurements over a time interval. +type DoubleSum struct { + DataPoints []*DoubleDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` + // If "true" means that the sum is monotonic. + IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleSum) Reset() { *m = DoubleSum{} } +func (m *DoubleSum) String() string { return proto.CompactTextString(m) } +func (*DoubleSum) ProtoMessage() {} +func (*DoubleSum) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{6} +} +func (m *DoubleSum) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleSum.Unmarshal(m, b) +} +func (m *DoubleSum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleSum.Marshal(b, m, deterministic) +} +func (m *DoubleSum) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleSum.Merge(m, src) +} +func (m *DoubleSum) XXX_Size() int { + return xxx_messageInfo_DoubleSum.Size(m) +} +func (m *DoubleSum) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleSum.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleSum proto.InternalMessageInfo + +func (m *DoubleSum) GetDataPoints() []*DoubleDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +func (m *DoubleSum) GetAggregationTemporality() AggregationTemporality { + if m != nil { + return m.AggregationTemporality + } + return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +} + +func (m *DoubleSum) GetIsMonotonic() bool { + if m != nil { + return m.IsMonotonic + } + return false +} + +// Represents the type of a metric that is calculated by aggregating as a +// Histogram of all reported int measurements over a time interval. +type IntHistogram struct { + DataPoints []*IntHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IntHistogram) Reset() { *m = IntHistogram{} } +func (m *IntHistogram) String() string { return proto.CompactTextString(m) } +func (*IntHistogram) ProtoMessage() {} +func (*IntHistogram) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{7} +} +func (m *IntHistogram) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IntHistogram.Unmarshal(m, b) +} +func (m *IntHistogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IntHistogram.Marshal(b, m, deterministic) +} +func (m *IntHistogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntHistogram.Merge(m, src) +} +func (m *IntHistogram) XXX_Size() int { + return xxx_messageInfo_IntHistogram.Size(m) +} +func (m *IntHistogram) XXX_DiscardUnknown() { + xxx_messageInfo_IntHistogram.DiscardUnknown(m) +} + +var xxx_messageInfo_IntHistogram proto.InternalMessageInfo + +func (m *IntHistogram) GetDataPoints() []*IntHistogramDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +func (m *IntHistogram) GetAggregationTemporality() AggregationTemporality { + if m != nil { + return m.AggregationTemporality + } + return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +} + +// Represents the type of a metric that is calculated by aggregating as a +// Histogram of all reported double measurements over a time interval. +type DoubleHistogram struct { + DataPoints []*DoubleHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleHistogram) Reset() { *m = DoubleHistogram{} } +func (m *DoubleHistogram) String() string { return proto.CompactTextString(m) } +func (*DoubleHistogram) ProtoMessage() {} +func (*DoubleHistogram) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{8} +} +func (m *DoubleHistogram) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleHistogram.Unmarshal(m, b) +} +func (m *DoubleHistogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleHistogram.Marshal(b, m, deterministic) +} +func (m *DoubleHistogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleHistogram.Merge(m, src) +} +func (m *DoubleHistogram) XXX_Size() int { + return xxx_messageInfo_DoubleHistogram.Size(m) +} +func (m *DoubleHistogram) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleHistogram.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleHistogram proto.InternalMessageInfo + +func (m *DoubleHistogram) GetDataPoints() []*DoubleHistogramDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +func (m *DoubleHistogram) GetAggregationTemporality() AggregationTemporality { + if m != nil { + return m.AggregationTemporality + } + return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +} + +// IntDataPoint is a single data point in a timeseries that describes the +// time-varying values of a int64 metric. +type IntDataPoint struct { + // The set of labels that uniquely identify this timeseries. + Labels []*v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"` + // start_time_unix_nano is the last time when the aggregation value was reset + // to "zero". For some metric types this is ignored, see data types for more + // details. + // + // The aggregation value is over the time interval (start_time_unix_nano, + // time_unix_nano]. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + // + // Value of 0 indicates that the timestamp is unspecified. In that case the + // timestamp may be decided by the backend. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // time_unix_nano is the moment when this aggregation value was reported. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // value itself. + Value int64 `protobuf:"fixed64,4,opt,name=value,proto3" json:"value,omitempty"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []*IntExemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IntDataPoint) Reset() { *m = IntDataPoint{} } +func (m *IntDataPoint) String() string { return proto.CompactTextString(m) } +func (*IntDataPoint) ProtoMessage() {} +func (*IntDataPoint) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{9} +} +func (m *IntDataPoint) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IntDataPoint.Unmarshal(m, b) +} +func (m *IntDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IntDataPoint.Marshal(b, m, deterministic) +} +func (m *IntDataPoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntDataPoint.Merge(m, src) +} +func (m *IntDataPoint) XXX_Size() int { + return xxx_messageInfo_IntDataPoint.Size(m) +} +func (m *IntDataPoint) XXX_DiscardUnknown() { + xxx_messageInfo_IntDataPoint.DiscardUnknown(m) +} + +var xxx_messageInfo_IntDataPoint proto.InternalMessageInfo + +func (m *IntDataPoint) GetLabels() []*v11.StringKeyValue { + if m != nil { + return m.Labels + } + return nil +} + +func (m *IntDataPoint) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *IntDataPoint) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *IntDataPoint) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *IntDataPoint) GetExemplars() []*IntExemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +// DoubleDataPoint is a single data point in a timeseries that describes the +// time-varying value of a double metric. +type DoubleDataPoint struct { + // The set of labels that uniquely identify this timeseries. + Labels []*v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"` + // start_time_unix_nano is the last time when the aggregation value was reset + // to "zero". For some metric types this is ignored, see data types for more + // details. + // + // The aggregation value is over the time interval (start_time_unix_nano, + // time_unix_nano]. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + // + // Value of 0 indicates that the timestamp is unspecified. In that case the + // timestamp may be decided by the backend. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // time_unix_nano is the moment when this aggregation value was reported. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // value itself. + Value float64 `protobuf:"fixed64,4,opt,name=value,proto3" json:"value,omitempty"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []*DoubleExemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleDataPoint) Reset() { *m = DoubleDataPoint{} } +func (m *DoubleDataPoint) String() string { return proto.CompactTextString(m) } +func (*DoubleDataPoint) ProtoMessage() {} +func (*DoubleDataPoint) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{10} +} +func (m *DoubleDataPoint) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleDataPoint.Unmarshal(m, b) +} +func (m *DoubleDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleDataPoint.Marshal(b, m, deterministic) +} +func (m *DoubleDataPoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleDataPoint.Merge(m, src) +} +func (m *DoubleDataPoint) XXX_Size() int { + return xxx_messageInfo_DoubleDataPoint.Size(m) +} +func (m *DoubleDataPoint) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleDataPoint.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleDataPoint proto.InternalMessageInfo + +func (m *DoubleDataPoint) GetLabels() []*v11.StringKeyValue { + if m != nil { + return m.Labels + } + return nil +} + +func (m *DoubleDataPoint) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *DoubleDataPoint) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *DoubleDataPoint) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *DoubleDataPoint) GetExemplars() []*DoubleExemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +// IntHistogramDataPoint is a single data point in a timeseries that describes +// the time-varying values of a Histogram of int values. A Histogram contains +// summary statistics for a population of values, it may optionally contain +// the distribution of those values across a set of buckets. +type IntHistogramDataPoint struct { + // The set of labels that uniquely identify this timeseries. + Labels []*v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"` + // start_time_unix_nano is the last time when the aggregation value was reset + // to "zero". For some metric types this is ignored, see data types for more + // details. + // + // The aggregation value is over the time interval (start_time_unix_nano, + // time_unix_nano]. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + // + // Value of 0 indicates that the timestamp is unspecified. In that case the + // timestamp may be decided by the backend. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // time_unix_nano is the moment when this aggregation value was reported. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // count is the number of values in the population. Must be non-negative. This + // value must be equal to the sum of the "count" fields in buckets if a + // histogram is provided. + Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` + // sum of the values in the population. If count is zero then this field + // must be zero. This value must be equal to the sum of the "sum" fields in + // buckets if a histogram is provided. + Sum int64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"` + // bucket_counts is an optional field contains the count values of histogram + // for each bucket. + // + // The sum of the bucket_counts must equal the value in the count field. + // + // The number of elements in bucket_counts array must be by one greater than + // the number of elements in explicit_bounds array. + BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` + // explicit_bounds specifies buckets with explicitly defined bounds for values. + // The bucket boundaries are described by "bounds" field. + // + // This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket + // at index i are: + // + // (-infinity, bounds[i]) for i == 0 + // [bounds[i-1], bounds[i]) for 0 < i < N-1 + // [bounds[i], +infinity) for i == N-1 + // The values in bounds array must be strictly increasing. + // + // Note: only [a, b) intervals are currently supported for each bucket except the first one. + // If we decide to also support (a, b] intervals we should add support for these by defining + // a boolean value which decides what type of intervals to use. + ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []*IntExemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IntHistogramDataPoint) Reset() { *m = IntHistogramDataPoint{} } +func (m *IntHistogramDataPoint) String() string { return proto.CompactTextString(m) } +func (*IntHistogramDataPoint) ProtoMessage() {} +func (*IntHistogramDataPoint) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{11} +} +func (m *IntHistogramDataPoint) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IntHistogramDataPoint.Unmarshal(m, b) +} +func (m *IntHistogramDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IntHistogramDataPoint.Marshal(b, m, deterministic) +} +func (m *IntHistogramDataPoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntHistogramDataPoint.Merge(m, src) +} +func (m *IntHistogramDataPoint) XXX_Size() int { + return xxx_messageInfo_IntHistogramDataPoint.Size(m) +} +func (m *IntHistogramDataPoint) XXX_DiscardUnknown() { + xxx_messageInfo_IntHistogramDataPoint.DiscardUnknown(m) +} + +var xxx_messageInfo_IntHistogramDataPoint proto.InternalMessageInfo + +func (m *IntHistogramDataPoint) GetLabels() []*v11.StringKeyValue { + if m != nil { + return m.Labels + } + return nil +} + +func (m *IntHistogramDataPoint) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *IntHistogramDataPoint) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *IntHistogramDataPoint) GetCount() uint64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *IntHistogramDataPoint) GetSum() int64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *IntHistogramDataPoint) GetBucketCounts() []uint64 { + if m != nil { + return m.BucketCounts + } + return nil +} + +func (m *IntHistogramDataPoint) GetExplicitBounds() []float64 { + if m != nil { + return m.ExplicitBounds + } + return nil +} + +func (m *IntHistogramDataPoint) GetExemplars() []*IntExemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +// HistogramDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Histogram of double values. A Histogram contains +// summary statistics for a population of values, it may optionally contain the +// distribution of those values across a set of buckets. +type DoubleHistogramDataPoint struct { + // The set of labels that uniquely identify this timeseries. + Labels []*v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"` + // start_time_unix_nano is the last time when the aggregation value was reset + // to "zero". For some metric types this is ignored, see data types for more + // details. + // + // The aggregation value is over the time interval (start_time_unix_nano, + // time_unix_nano]. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + // + // Value of 0 indicates that the timestamp is unspecified. In that case the + // timestamp may be decided by the backend. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // time_unix_nano is the moment when this aggregation value was reported. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // count is the number of values in the population. Must be non-negative. This + // value must be equal to the sum of the "count" fields in buckets if a + // histogram is provided. + Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` + // sum of the values in the population. If count is zero then this field + // must be zero. This value must be equal to the sum of the "sum" fields in + // buckets if a histogram is provided. + Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"` + // bucket_counts is an optional field contains the count values of histogram + // for each bucket. + // + // The sum of the bucket_counts must equal the value in the count field. + // + // The number of elements in bucket_counts array must be by one greater than + // the number of elements in explicit_bounds array. + BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` + // explicit_bounds specifies buckets with explicitly defined bounds for values. + // The bucket boundaries are described by "bounds" field. + // + // This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket + // at index i are: + // + // (-infinity, bounds[i]) for i == 0 + // [bounds[i-1], bounds[i]) for 0 < i < N-1 + // [bounds[i], +infinity) for i == N-1 + // The values in bounds array must be strictly increasing. + // + // Note: only [a, b) intervals are currently supported for each bucket except the first one. + // If we decide to also support (a, b] intervals we should add support for these by defining + // a boolean value which decides what type of intervals to use. + ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []*DoubleExemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleHistogramDataPoint) Reset() { *m = DoubleHistogramDataPoint{} } +func (m *DoubleHistogramDataPoint) String() string { return proto.CompactTextString(m) } +func (*DoubleHistogramDataPoint) ProtoMessage() {} +func (*DoubleHistogramDataPoint) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{12} +} +func (m *DoubleHistogramDataPoint) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleHistogramDataPoint.Unmarshal(m, b) +} +func (m *DoubleHistogramDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleHistogramDataPoint.Marshal(b, m, deterministic) +} +func (m *DoubleHistogramDataPoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleHistogramDataPoint.Merge(m, src) +} +func (m *DoubleHistogramDataPoint) XXX_Size() int { + return xxx_messageInfo_DoubleHistogramDataPoint.Size(m) +} +func (m *DoubleHistogramDataPoint) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleHistogramDataPoint.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleHistogramDataPoint proto.InternalMessageInfo + +func (m *DoubleHistogramDataPoint) GetLabels() []*v11.StringKeyValue { + if m != nil { + return m.Labels + } + return nil +} + +func (m *DoubleHistogramDataPoint) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *DoubleHistogramDataPoint) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *DoubleHistogramDataPoint) GetCount() uint64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *DoubleHistogramDataPoint) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *DoubleHistogramDataPoint) GetBucketCounts() []uint64 { + if m != nil { + return m.BucketCounts + } + return nil +} + +func (m *DoubleHistogramDataPoint) GetExplicitBounds() []float64 { + if m != nil { + return m.ExplicitBounds + } + return nil +} + +func (m *DoubleHistogramDataPoint) GetExemplars() []*DoubleExemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +// A representation of an exemplar, which is a sample input int measurement. +// Exemplars also hold information about the environment when the measurement +// was recorded, for example the span and trace ID of the active span when the +// exemplar was recorded. +type IntExemplar struct { + // The set of labels that were filtered out by the aggregator, but recorded + // alongside the original measurement. Only labels that were filtered out + // by the aggregator should be included + FilteredLabels []*v11.StringKeyValue `protobuf:"bytes,1,rep,name=filtered_labels,json=filteredLabels,proto3" json:"filtered_labels,omitempty"` + // time_unix_nano is the exact time when this exemplar was recorded + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // Numerical int value of the measurement that was recorded. + Value int64 `protobuf:"fixed64,3,opt,name=value,proto3" json:"value,omitempty"` + // (Optional) Span ID of the exemplar trace. + // span_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + SpanId []byte `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // (Optional) Trace ID of the exemplar trace. + // trace_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + TraceId []byte `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IntExemplar) Reset() { *m = IntExemplar{} } +func (m *IntExemplar) String() string { return proto.CompactTextString(m) } +func (*IntExemplar) ProtoMessage() {} +func (*IntExemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{13} +} +func (m *IntExemplar) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IntExemplar.Unmarshal(m, b) +} +func (m *IntExemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IntExemplar.Marshal(b, m, deterministic) +} +func (m *IntExemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntExemplar.Merge(m, src) +} +func (m *IntExemplar) XXX_Size() int { + return xxx_messageInfo_IntExemplar.Size(m) +} +func (m *IntExemplar) XXX_DiscardUnknown() { + xxx_messageInfo_IntExemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_IntExemplar proto.InternalMessageInfo + +func (m *IntExemplar) GetFilteredLabels() []*v11.StringKeyValue { + if m != nil { + return m.FilteredLabels + } + return nil +} + +func (m *IntExemplar) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *IntExemplar) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *IntExemplar) GetSpanId() []byte { + if m != nil { + return m.SpanId + } + return nil +} + +func (m *IntExemplar) GetTraceId() []byte { + if m != nil { + return m.TraceId + } + return nil +} + +// A representation of an exemplar, which is a sample input double measurement. +// Exemplars also hold information about the environment when the measurement +// was recorded, for example the span and trace ID of the active span when the +// exemplar was recorded. +type DoubleExemplar struct { + // The set of labels that were filtered out by the aggregator, but recorded + // alongside the original measurement. Only labels that were filtered out + // by the aggregator should be included + FilteredLabels []*v11.StringKeyValue `protobuf:"bytes,1,rep,name=filtered_labels,json=filteredLabels,proto3" json:"filtered_labels,omitempty"` + // time_unix_nano is the exact time when this exemplar was recorded + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // Numerical double value of the measurement that was recorded. + Value float64 `protobuf:"fixed64,3,opt,name=value,proto3" json:"value,omitempty"` + // (Optional) Span ID of the exemplar trace. + // span_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + SpanId []byte `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // (Optional) Trace ID of the exemplar trace. + // trace_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + TraceId []byte `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleExemplar) Reset() { *m = DoubleExemplar{} } +func (m *DoubleExemplar) String() string { return proto.CompactTextString(m) } +func (*DoubleExemplar) ProtoMessage() {} +func (*DoubleExemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{14} +} +func (m *DoubleExemplar) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleExemplar.Unmarshal(m, b) +} +func (m *DoubleExemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleExemplar.Marshal(b, m, deterministic) +} +func (m *DoubleExemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleExemplar.Merge(m, src) +} +func (m *DoubleExemplar) XXX_Size() int { + return xxx_messageInfo_DoubleExemplar.Size(m) +} +func (m *DoubleExemplar) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleExemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleExemplar proto.InternalMessageInfo + +func (m *DoubleExemplar) GetFilteredLabels() []*v11.StringKeyValue { + if m != nil { + return m.FilteredLabels + } + return nil +} + +func (m *DoubleExemplar) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *DoubleExemplar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *DoubleExemplar) GetSpanId() []byte { + if m != nil { + return m.SpanId + } + return nil +} + +func (m *DoubleExemplar) GetTraceId() []byte { + if m != nil { + return m.TraceId + } + return nil +} + +func init() { + proto.RegisterEnum("opentelemetry.proto.metrics.v1.AggregationTemporality", AggregationTemporality_name, AggregationTemporality_value) + proto.RegisterType((*ResourceMetrics)(nil), "opentelemetry.proto.metrics.v1.ResourceMetrics") + proto.RegisterType((*InstrumentationLibraryMetrics)(nil), "opentelemetry.proto.metrics.v1.InstrumentationLibraryMetrics") + proto.RegisterType((*Metric)(nil), "opentelemetry.proto.metrics.v1.Metric") + proto.RegisterType((*IntGauge)(nil), "opentelemetry.proto.metrics.v1.IntGauge") + proto.RegisterType((*DoubleGauge)(nil), "opentelemetry.proto.metrics.v1.DoubleGauge") + proto.RegisterType((*IntSum)(nil), "opentelemetry.proto.metrics.v1.IntSum") + proto.RegisterType((*DoubleSum)(nil), "opentelemetry.proto.metrics.v1.DoubleSum") + proto.RegisterType((*IntHistogram)(nil), "opentelemetry.proto.metrics.v1.IntHistogram") + proto.RegisterType((*DoubleHistogram)(nil), "opentelemetry.proto.metrics.v1.DoubleHistogram") + proto.RegisterType((*IntDataPoint)(nil), "opentelemetry.proto.metrics.v1.IntDataPoint") + proto.RegisterType((*DoubleDataPoint)(nil), "opentelemetry.proto.metrics.v1.DoubleDataPoint") + proto.RegisterType((*IntHistogramDataPoint)(nil), "opentelemetry.proto.metrics.v1.IntHistogramDataPoint") + proto.RegisterType((*DoubleHistogramDataPoint)(nil), "opentelemetry.proto.metrics.v1.DoubleHistogramDataPoint") + proto.RegisterType((*IntExemplar)(nil), "opentelemetry.proto.metrics.v1.IntExemplar") + proto.RegisterType((*DoubleExemplar)(nil), "opentelemetry.proto.metrics.v1.DoubleExemplar") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/metrics/v1/metrics.proto", fileDescriptor_3c3112f9fa006917) +} + +var fileDescriptor_3c3112f9fa006917 = []byte{ + // 1059 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0x41, 0x4f, 0xe3, 0x46, + 0x14, 0xc6, 0x09, 0x38, 0xc9, 0x4b, 0x16, 0xd2, 0xd1, 0x16, 0xdc, 0x95, 0x68, 0x21, 0x5b, 0xb1, + 0x74, 0x77, 0x49, 0x04, 0xd5, 0x56, 0xbd, 0x54, 0x6d, 0x80, 0x14, 0xd2, 0x06, 0x36, 0x1a, 0x02, + 0x12, 0x55, 0x25, 0x6b, 0x12, 0x4f, 0xb3, 0xa3, 0xda, 0x33, 0x91, 0x3d, 0x46, 0xf0, 0x03, 0x7a, + 0x6b, 0x4f, 0xfd, 0x31, 0xfd, 0x1d, 0x3d, 0x54, 0xed, 0xa5, 0x52, 0xef, 0x3d, 0xf5, 0xd2, 0x53, + 0x0f, 0xd5, 0x8c, 0x6d, 0x92, 0x2c, 0x86, 0x64, 0xc5, 0xae, 0xc4, 0xde, 0xde, 0xbc, 0x79, 0xef, + 0xf3, 0xf7, 0xbe, 0xf7, 0x5e, 0x0c, 0x86, 0xa7, 0x62, 0x40, 0xb9, 0xa4, 0x2e, 0xf5, 0xa8, 0xf4, + 0x2f, 0x6a, 0x03, 0x5f, 0x48, 0x51, 0x53, 0x36, 0xeb, 0x05, 0xb5, 0xb3, 0xcd, 0xc4, 0xac, 0xea, + 0x0b, 0xf4, 0xfe, 0x58, 0x74, 0xe4, 0xac, 0x26, 0x21, 0x67, 0x9b, 0x0f, 0x1e, 0xa7, 0xa1, 0xf5, + 0x84, 0xe7, 0x09, 0xae, 0xc0, 0x22, 0x2b, 0x4a, 0x7b, 0x50, 0x4d, 0x8b, 0xf5, 0x69, 0x20, 0x42, + 0xbf, 0x47, 0x55, 0x74, 0x62, 0x47, 0xf1, 0x95, 0xbf, 0x0c, 0x58, 0xc0, 0xb1, 0xeb, 0x20, 0x7a, + 0x24, 0x6a, 0x40, 0x3e, 0x89, 0xb2, 0x8c, 0x15, 0x63, 0xbd, 0xb8, 0xf5, 0x51, 0x35, 0x8d, 0xe2, + 0x25, 0xd4, 0xd9, 0x66, 0x35, 0xc1, 0xc0, 0x97, 0xa9, 0xe8, 0x07, 0x03, 0x3e, 0x60, 0x3c, 0x90, + 0x7e, 0xe8, 0x51, 0x2e, 0x89, 0x64, 0x82, 0xdb, 0x2e, 0xeb, 0xfa, 0xc4, 0xbf, 0xb0, 0xe3, 0xea, + 0xac, 0xcc, 0x4a, 0x76, 0xbd, 0xb8, 0xf5, 0x59, 0xf5, 0x66, 0x05, 0xaa, 0xcd, 0x71, 0x98, 0x56, + 0x84, 0x12, 0xf3, 0xc5, 0xcb, 0xec, 0xa6, 0xeb, 0xca, 0xaf, 0x06, 0x2c, 0xdf, 0x08, 0x80, 0x38, + 0x2c, 0x5d, 0x43, 0x34, 0xae, 0xff, 0x59, 0x2a, 0xc1, 0x58, 0xf8, 0x6b, 0xf9, 0xe1, 0xc5, 0x74, + 0x62, 0xe8, 0x0b, 0xc8, 0x8d, 0x0b, 0xb0, 0x36, 0x49, 0x80, 0x88, 0x29, 0x4e, 0xd2, 0x2a, 0xbf, + 0xcc, 0x82, 0x19, 0xf9, 0x10, 0x82, 0x59, 0x4e, 0xbc, 0xa8, 0x53, 0x05, 0xac, 0x6d, 0xb4, 0x02, + 0x45, 0x87, 0x06, 0x3d, 0x9f, 0x0d, 0xd4, 0x63, 0xad, 0x8c, 0xbe, 0x1a, 0x75, 0xa9, 0xac, 0x90, + 0x33, 0x69, 0x65, 0xa3, 0x2c, 0x65, 0xa3, 0x3d, 0x28, 0x30, 0x2e, 0xed, 0x3e, 0x09, 0xfb, 0xd4, + 0x9a, 0xd5, 0x85, 0xaf, 0x4f, 0xee, 0x8c, 0xdc, 0x53, 0xf1, 0xfb, 0x33, 0x38, 0xcf, 0x62, 0x1b, + 0xb5, 0xa1, 0xe4, 0x88, 0xb0, 0xeb, 0xd2, 0x18, 0x6b, 0x4e, 0x63, 0x3d, 0x99, 0x84, 0xb5, 0xab, + 0x73, 0x12, 0xb8, 0xa2, 0x33, 0x3c, 0xa2, 0x3a, 0xe4, 0x14, 0xb5, 0x20, 0xf4, 0x2c, 0x53, 0x83, + 0xad, 0x4d, 0x41, 0xec, 0x28, 0xf4, 0xf6, 0x67, 0xb0, 0xc9, 0xb4, 0x85, 0xbe, 0x02, 0x88, 0x49, + 0x29, 0x94, 0xdc, 0x0d, 0x73, 0x7d, 0x85, 0x52, 0x04, 0x54, 0x70, 0x92, 0x03, 0x3a, 0x82, 0x7b, + 0x8a, 0xce, 0x0b, 0x16, 0x48, 0xd1, 0xf7, 0x89, 0x67, 0xe5, 0x35, 0xdc, 0xd3, 0x29, 0x48, 0xed, + 0x27, 0x39, 0xfb, 0x33, 0xb8, 0xc4, 0x46, 0xce, 0xe8, 0x5b, 0x28, 0xc7, 0x04, 0x87, 0xb8, 0x05, + 0x8d, 0x5b, 0x9b, 0x8e, 0xe6, 0x28, 0xf4, 0x82, 0x33, 0xee, 0xda, 0x36, 0x61, 0xd6, 0x21, 0x92, + 0x54, 0x4e, 0x21, 0x9f, 0xf4, 0x0c, 0x1d, 0x40, 0x51, 0xf9, 0xec, 0x81, 0x60, 0x5c, 0x06, 0x96, + 0xa1, 0x67, 0x71, 0x9a, 0x22, 0x76, 0x89, 0x24, 0x6d, 0x95, 0x84, 0xc1, 0x49, 0xcc, 0xa0, 0x62, + 0x43, 0x71, 0xa4, 0x85, 0xa8, 0x9d, 0x86, 0x3e, 0x65, 0x29, 0xe9, 0x0f, 0xf8, 0xdb, 0x00, 0x33, + 0xea, 0xeb, 0x6b, 0xa6, 0x8e, 0x04, 0x2c, 0x91, 0x7e, 0xdf, 0xa7, 0xfd, 0x68, 0xfb, 0x25, 0xf5, + 0x06, 0xc2, 0x27, 0x2e, 0x93, 0x17, 0x7a, 0x79, 0xe6, 0xb7, 0x3e, 0x99, 0x04, 0x5d, 0x1f, 0xa6, + 0x77, 0x86, 0xd9, 0x78, 0x91, 0xa4, 0xfa, 0xd1, 0x2a, 0x94, 0x58, 0x60, 0x7b, 0x82, 0x0b, 0x29, + 0x38, 0xeb, 0xe9, 0x3d, 0xcc, 0xe3, 0x22, 0x0b, 0x0e, 0x12, 0x57, 0xe5, 0x1f, 0x03, 0x0a, 0x97, + 0xf3, 0xf7, 0xfa, 0xd5, 0xbc, 0x93, 0x35, 0xff, 0x6e, 0x40, 0x69, 0x74, 0x49, 0xd0, 0x49, 0x5a, + 0xd9, 0xcf, 0x5e, 0x65, 0xcf, 0xee, 0x46, 0xf1, 0x95, 0x3f, 0x0d, 0x58, 0x78, 0x69, 0x4d, 0xd1, + 0x69, 0x5a, 0x71, 0x9f, 0xbe, 0xe2, 0xb2, 0xdf, 0x91, 0xfa, 0x7e, 0xca, 0xe8, 0xce, 0x5d, 0xb2, + 0x41, 0x0d, 0x30, 0x5d, 0xd2, 0xa5, 0x6e, 0x52, 0xd7, 0xc6, 0x84, 0x77, 0xe8, 0x91, 0xf4, 0x19, + 0xef, 0x7f, 0x4d, 0x2f, 0x4e, 0x88, 0x1b, 0x52, 0x1c, 0x27, 0xa3, 0x1a, 0xdc, 0x0f, 0x24, 0xf1, + 0xa5, 0x2d, 0x99, 0x47, 0xed, 0x90, 0xb3, 0x73, 0x9b, 0x13, 0x2e, 0x74, 0x15, 0x26, 0x7e, 0x47, + 0xdf, 0x75, 0x98, 0x47, 0x8f, 0x39, 0x3b, 0x3f, 0x24, 0x5c, 0xa0, 0x0f, 0x61, 0xfe, 0xa5, 0xd0, + 0xac, 0x0e, 0x2d, 0xc9, 0xd1, 0xa8, 0xfb, 0x30, 0x77, 0xa6, 0x9e, 0xa3, 0xdf, 0x73, 0x65, 0x1c, + 0x1d, 0x50, 0x13, 0x0a, 0xf4, 0x9c, 0x7a, 0x03, 0x97, 0xf8, 0x81, 0x35, 0xa7, 0x69, 0x3f, 0x99, + 0x62, 0xd6, 0x1a, 0x71, 0x0e, 0x1e, 0x66, 0x57, 0x7e, 0xce, 0x24, 0xfd, 0x7e, 0x2b, 0x25, 0x31, + 0x12, 0x49, 0x5a, 0x57, 0x25, 0xa9, 0x4e, 0x37, 0xa1, 0x69, 0xaa, 0xfc, 0x9b, 0x81, 0x77, 0x53, + 0x97, 0xf3, 0xee, 0x6b, 0xd3, 0x13, 0x21, 0x97, 0x5a, 0x1b, 0x13, 0x47, 0x07, 0x54, 0x86, 0xac, + 0xfa, 0x5b, 0x62, 0x4e, 0x8f, 0x90, 0x32, 0xd1, 0x43, 0xb8, 0xd7, 0x0d, 0x7b, 0xdf, 0x53, 0x69, + 0xeb, 0x88, 0xc0, 0x32, 0x57, 0xb2, 0x0a, 0x2c, 0x72, 0xee, 0x68, 0x1f, 0x7a, 0x04, 0x0b, 0xf4, + 0x7c, 0xe0, 0xb2, 0x1e, 0x93, 0x76, 0x57, 0x84, 0xdc, 0x09, 0xac, 0xdc, 0x4a, 0x76, 0xdd, 0xc0, + 0xf3, 0x89, 0x7b, 0x5b, 0x7b, 0xc7, 0xc7, 0x31, 0x7f, 0xab, 0x71, 0xfc, 0x2f, 0x03, 0xd6, 0x75, + 0x3f, 0x1c, 0x6f, 0xbb, 0xf6, 0xc6, 0x9b, 0xd0, 0xbe, 0x75, 0x55, 0xfb, 0x5b, 0xcc, 0xfd, 0x6f, + 0x06, 0x14, 0x47, 0x3a, 0x83, 0x4e, 0x60, 0xe1, 0x3b, 0xe6, 0x4a, 0xea, 0x53, 0xc7, 0xbe, 0x8d, + 0xf4, 0xf3, 0x09, 0x4a, 0x2b, 0x6a, 0xc1, 0x55, 0x45, 0x33, 0x37, 0x6d, 0x7a, 0x76, 0xf4, 0xc7, + 0x6f, 0x09, 0x72, 0xc1, 0x80, 0x70, 0x9b, 0x39, 0x5a, 0xe9, 0x12, 0x36, 0xd5, 0xb1, 0xe9, 0xa0, + 0xf7, 0x20, 0x2f, 0x7d, 0xd2, 0xa3, 0xea, 0x66, 0x4e, 0xdf, 0xe4, 0xf4, 0xb9, 0xe9, 0x54, 0xfe, + 0x30, 0x60, 0x7e, 0xbc, 0xea, 0xbb, 0x54, 0x9a, 0x71, 0x8b, 0xd2, 0x1e, 0xff, 0x68, 0xc0, 0x62, + 0xfa, 0x3b, 0x10, 0x3d, 0x82, 0x87, 0xf5, 0xbd, 0x3d, 0xdc, 0xd8, 0xab, 0x77, 0x9a, 0xcf, 0x0f, + 0xed, 0x4e, 0xe3, 0xa0, 0xfd, 0x1c, 0xd7, 0x5b, 0xcd, 0xce, 0xa9, 0x7d, 0x7c, 0x78, 0xd4, 0x6e, + 0xec, 0x34, 0xbf, 0x6c, 0x36, 0x76, 0xcb, 0x33, 0x68, 0x15, 0x96, 0xaf, 0x0b, 0xdc, 0x6d, 0xb4, + 0x3a, 0xf5, 0xb2, 0x81, 0xd6, 0xa0, 0x72, 0x5d, 0xc8, 0xce, 0xf1, 0xc1, 0x71, 0xab, 0xde, 0x69, + 0x9e, 0x34, 0xca, 0x99, 0x6d, 0x09, 0xab, 0x4c, 0x4c, 0x18, 0xc0, 0xed, 0x52, 0xfc, 0x1f, 0x6d, + 0x5b, 0x5d, 0xb4, 0x8d, 0x6f, 0x3e, 0xef, 0x33, 0xf9, 0x22, 0xec, 0x2a, 0x91, 0x6b, 0x2a, 0x75, + 0x63, 0xf8, 0x65, 0x60, 0x0c, 0x69, 0x23, 0xfa, 0x4e, 0xd0, 0xa7, 0xbc, 0xd6, 0x1f, 0xfd, 0x50, + 0xd1, 0x35, 0xf5, 0xc5, 0xc7, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x16, 0x7c, 0x5f, 0x8f, 0xd1, + 0x10, 0x00, 0x00, +} diff --git a/internal/opentelemetry-proto-gen/resource/v1/resource.pb.go b/internal/opentelemetry-proto-gen/resource/v1/resource.pb.go new file mode 100644 index 0000000000..75fedfe4e5 --- /dev/null +++ b/internal/opentelemetry-proto-gen/resource/v1/resource.pb.go @@ -0,0 +1,100 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/resource/v1/resource.proto + +package v1 + +import ( + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" + v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Resource information. +type Resource struct { + // Set of labels that describe the resource. + Attributes []*v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, then + // no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Resource) Reset() { *m = Resource{} } +func (m *Resource) String() string { return proto.CompactTextString(m) } +func (*Resource) ProtoMessage() {} +func (*Resource) Descriptor() ([]byte, []int) { + return fileDescriptor_446f73eacf88f3f5, []int{0} +} +func (m *Resource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Resource.Unmarshal(m, b) +} +func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Resource.Marshal(b, m, deterministic) +} +func (m *Resource) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resource.Merge(m, src) +} +func (m *Resource) XXX_Size() int { + return xxx_messageInfo_Resource.Size(m) +} +func (m *Resource) XXX_DiscardUnknown() { + xxx_messageInfo_Resource.DiscardUnknown(m) +} + +var xxx_messageInfo_Resource proto.InternalMessageInfo + +func (m *Resource) GetAttributes() []*v1.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Resource) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +func init() { + proto.RegisterType((*Resource)(nil), "opentelemetry.proto.resource.v1.Resource") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/resource/v1/resource.proto", fileDescriptor_446f73eacf88f3f5) +} + +var fileDescriptor_446f73eacf88f3f5 = []byte{ + // 227 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xcb, 0x2f, 0x48, 0xcd, + 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, + 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, + 0x42, 0xf2, 0x28, 0xea, 0x21, 0x82, 0x7a, 0x70, 0x35, 0x65, 0x86, 0x52, 0x5a, 0xd8, 0x0c, 0x4c, + 0xce, 0xcf, 0xcd, 0xcd, 0xcf, 0x03, 0x19, 0x07, 0x61, 0x41, 0xf4, 0x29, 0xf5, 0x32, 0x72, 0x71, + 0x04, 0x41, 0xf5, 0x0a, 0xb9, 0x73, 0x71, 0x25, 0x96, 0x94, 0x14, 0x65, 0x26, 0x95, 0x96, 0xa4, + 0x16, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xa9, 0xeb, 0x61, 0xb3, 0x0e, 0x6a, 0x46, 0x99, + 0xa1, 0x9e, 0x77, 0x6a, 0x65, 0x58, 0x62, 0x4e, 0x69, 0x6a, 0x10, 0x92, 0x56, 0x21, 0x0b, 0x2e, + 0x89, 0x94, 0xa2, 0xfc, 0x82, 0x82, 0xd4, 0x94, 0x78, 0x84, 0x68, 0x7c, 0x72, 0x7e, 0x69, 0x5e, + 0x89, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6f, 0x90, 0x18, 0x54, 0xde, 0x11, 0x2e, 0xed, 0x0c, 0x92, + 0x75, 0x2a, 0xe7, 0x52, 0xca, 0xcc, 0xd7, 0x23, 0xe0, 0x43, 0x27, 0x5e, 0x98, 0x93, 0x03, 0x40, + 0x52, 0x01, 0x8c, 0x51, 0x0e, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x20, 0x77, 0xe9, 0x83, 0x34, + 0xeb, 0x22, 0xbc, 0x8f, 0x62, 0x96, 0x2e, 0x24, 0x30, 0xd2, 0x53, 0xf3, 0xf4, 0xd3, 0x51, 0x02, + 0x39, 0x89, 0x0d, 0x2c, 0x63, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xba, 0x7f, 0x2f, 0x93, 0x8e, + 0x01, 0x00, 0x00, +} diff --git a/internal/opentelemetry-proto-gen/trace/v1/trace.pb.go b/internal/opentelemetry-proto-gen/trace/v1/trace.pb.go new file mode 100644 index 0000000000..8e63f5749f --- /dev/null +++ b/internal/opentelemetry-proto-gen/trace/v1/trace.pb.go @@ -0,0 +1,815 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/trace/v1/trace.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + v11 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" + v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/resource/v1" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type Span_SpanKind int32 + +const ( + // Unspecified. Do NOT use as default. + // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. + Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 + // Indicates that the span represents an internal operation within an application, + // as opposed to an operations happening at the boundaries. Default value. + Span_SPAN_KIND_INTERNAL Span_SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + Span_SPAN_KIND_SERVER Span_SpanKind = 2 + // Indicates that the span describes a request to some remote service. + Span_SPAN_KIND_CLIENT Span_SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + Span_SPAN_KIND_PRODUCER Span_SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + Span_SPAN_KIND_CONSUMER Span_SpanKind = 5 +) + +var Span_SpanKind_name = map[int32]string{ + 0: "SPAN_KIND_UNSPECIFIED", + 1: "SPAN_KIND_INTERNAL", + 2: "SPAN_KIND_SERVER", + 3: "SPAN_KIND_CLIENT", + 4: "SPAN_KIND_PRODUCER", + 5: "SPAN_KIND_CONSUMER", +} + +var Span_SpanKind_value = map[string]int32{ + "SPAN_KIND_UNSPECIFIED": 0, + "SPAN_KIND_INTERNAL": 1, + "SPAN_KIND_SERVER": 2, + "SPAN_KIND_CLIENT": 3, + "SPAN_KIND_PRODUCER": 4, + "SPAN_KIND_CONSUMER": 5, +} + +func (x Span_SpanKind) String() string { + return proto.EnumName(Span_SpanKind_name, int32(x)) +} + +func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{2, 0} +} + +type Status_DeprecatedStatusCode int32 + +const ( + Status_DEPRECATED_STATUS_CODE_OK Status_DeprecatedStatusCode = 0 + Status_DEPRECATED_STATUS_CODE_CANCELLED Status_DeprecatedStatusCode = 1 + Status_DEPRECATED_STATUS_CODE_UNKNOWN_ERROR Status_DeprecatedStatusCode = 2 + Status_DEPRECATED_STATUS_CODE_INVALID_ARGUMENT Status_DeprecatedStatusCode = 3 + Status_DEPRECATED_STATUS_CODE_DEADLINE_EXCEEDED Status_DeprecatedStatusCode = 4 + Status_DEPRECATED_STATUS_CODE_NOT_FOUND Status_DeprecatedStatusCode = 5 + Status_DEPRECATED_STATUS_CODE_ALREADY_EXISTS Status_DeprecatedStatusCode = 6 + Status_DEPRECATED_STATUS_CODE_PERMISSION_DENIED Status_DeprecatedStatusCode = 7 + Status_DEPRECATED_STATUS_CODE_RESOURCE_EXHAUSTED Status_DeprecatedStatusCode = 8 + Status_DEPRECATED_STATUS_CODE_FAILED_PRECONDITION Status_DeprecatedStatusCode = 9 + Status_DEPRECATED_STATUS_CODE_ABORTED Status_DeprecatedStatusCode = 10 + Status_DEPRECATED_STATUS_CODE_OUT_OF_RANGE Status_DeprecatedStatusCode = 11 + Status_DEPRECATED_STATUS_CODE_UNIMPLEMENTED Status_DeprecatedStatusCode = 12 + Status_DEPRECATED_STATUS_CODE_INTERNAL_ERROR Status_DeprecatedStatusCode = 13 + Status_DEPRECATED_STATUS_CODE_UNAVAILABLE Status_DeprecatedStatusCode = 14 + Status_DEPRECATED_STATUS_CODE_DATA_LOSS Status_DeprecatedStatusCode = 15 + Status_DEPRECATED_STATUS_CODE_UNAUTHENTICATED Status_DeprecatedStatusCode = 16 +) + +var Status_DeprecatedStatusCode_name = map[int32]string{ + 0: "DEPRECATED_STATUS_CODE_OK", + 1: "DEPRECATED_STATUS_CODE_CANCELLED", + 2: "DEPRECATED_STATUS_CODE_UNKNOWN_ERROR", + 3: "DEPRECATED_STATUS_CODE_INVALID_ARGUMENT", + 4: "DEPRECATED_STATUS_CODE_DEADLINE_EXCEEDED", + 5: "DEPRECATED_STATUS_CODE_NOT_FOUND", + 6: "DEPRECATED_STATUS_CODE_ALREADY_EXISTS", + 7: "DEPRECATED_STATUS_CODE_PERMISSION_DENIED", + 8: "DEPRECATED_STATUS_CODE_RESOURCE_EXHAUSTED", + 9: "DEPRECATED_STATUS_CODE_FAILED_PRECONDITION", + 10: "DEPRECATED_STATUS_CODE_ABORTED", + 11: "DEPRECATED_STATUS_CODE_OUT_OF_RANGE", + 12: "DEPRECATED_STATUS_CODE_UNIMPLEMENTED", + 13: "DEPRECATED_STATUS_CODE_INTERNAL_ERROR", + 14: "DEPRECATED_STATUS_CODE_UNAVAILABLE", + 15: "DEPRECATED_STATUS_CODE_DATA_LOSS", + 16: "DEPRECATED_STATUS_CODE_UNAUTHENTICATED", +} + +var Status_DeprecatedStatusCode_value = map[string]int32{ + "DEPRECATED_STATUS_CODE_OK": 0, + "DEPRECATED_STATUS_CODE_CANCELLED": 1, + "DEPRECATED_STATUS_CODE_UNKNOWN_ERROR": 2, + "DEPRECATED_STATUS_CODE_INVALID_ARGUMENT": 3, + "DEPRECATED_STATUS_CODE_DEADLINE_EXCEEDED": 4, + "DEPRECATED_STATUS_CODE_NOT_FOUND": 5, + "DEPRECATED_STATUS_CODE_ALREADY_EXISTS": 6, + "DEPRECATED_STATUS_CODE_PERMISSION_DENIED": 7, + "DEPRECATED_STATUS_CODE_RESOURCE_EXHAUSTED": 8, + "DEPRECATED_STATUS_CODE_FAILED_PRECONDITION": 9, + "DEPRECATED_STATUS_CODE_ABORTED": 10, + "DEPRECATED_STATUS_CODE_OUT_OF_RANGE": 11, + "DEPRECATED_STATUS_CODE_UNIMPLEMENTED": 12, + "DEPRECATED_STATUS_CODE_INTERNAL_ERROR": 13, + "DEPRECATED_STATUS_CODE_UNAVAILABLE": 14, + "DEPRECATED_STATUS_CODE_DATA_LOSS": 15, + "DEPRECATED_STATUS_CODE_UNAUTHENTICATED": 16, +} + +func (x Status_DeprecatedStatusCode) String() string { + return proto.EnumName(Status_DeprecatedStatusCode_name, int32(x)) +} + +func (Status_DeprecatedStatusCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{3, 0} +} + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#set-status +type Status_StatusCode int32 + +const ( + // The default status. + Status_STATUS_CODE_UNSET Status_StatusCode = 0 + // The Span has been validated by an Application developers or Operator to have + // completed successfully. + Status_STATUS_CODE_OK Status_StatusCode = 1 + // The Span contains an error. + Status_STATUS_CODE_ERROR Status_StatusCode = 2 +) + +var Status_StatusCode_name = map[int32]string{ + 0: "STATUS_CODE_UNSET", + 1: "STATUS_CODE_OK", + 2: "STATUS_CODE_ERROR", +} + +var Status_StatusCode_value = map[string]int32{ + "STATUS_CODE_UNSET": 0, + "STATUS_CODE_OK": 1, + "STATUS_CODE_ERROR": 2, +} + +func (x Status_StatusCode) String() string { + return proto.EnumName(Status_StatusCode_name, int32(x)) +} + +func (Status_StatusCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{3, 1} +} + +// A collection of InstrumentationLibrarySpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // A list of InstrumentationLibrarySpans that originate from a resource. + InstrumentationLibrarySpans []*InstrumentationLibrarySpans `protobuf:"bytes,2,rep,name=instrumentation_library_spans,json=instrumentationLibrarySpans,proto3" json:"instrumentation_library_spans,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceSpans) Reset() { *m = ResourceSpans{} } +func (m *ResourceSpans) String() string { return proto.CompactTextString(m) } +func (*ResourceSpans) ProtoMessage() {} +func (*ResourceSpans) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{0} +} +func (m *ResourceSpans) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourceSpans.Unmarshal(m, b) +} +func (m *ResourceSpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourceSpans.Marshal(b, m, deterministic) +} +func (m *ResourceSpans) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSpans.Merge(m, src) +} +func (m *ResourceSpans) XXX_Size() int { + return xxx_messageInfo_ResourceSpans.Size(m) +} +func (m *ResourceSpans) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSpans.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSpans proto.InternalMessageInfo + +func (m *ResourceSpans) GetResource() *v1.Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *ResourceSpans) GetInstrumentationLibrarySpans() []*InstrumentationLibrarySpans { + if m != nil { + return m.InstrumentationLibrarySpans + } + return nil +} + +// A collection of Spans produced by an InstrumentationLibrary. +type InstrumentationLibrarySpans struct { + // The instrumentation library information for the spans in this message. + // If this field is not set then no library info is known. + InstrumentationLibrary *v11.InstrumentationLibrary `protobuf:"bytes,1,opt,name=instrumentation_library,json=instrumentationLibrary,proto3" json:"instrumentation_library,omitempty"` + // A list of Spans that originate from an instrumentation library. + Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InstrumentationLibrarySpans) Reset() { *m = InstrumentationLibrarySpans{} } +func (m *InstrumentationLibrarySpans) String() string { return proto.CompactTextString(m) } +func (*InstrumentationLibrarySpans) ProtoMessage() {} +func (*InstrumentationLibrarySpans) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{1} +} +func (m *InstrumentationLibrarySpans) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InstrumentationLibrarySpans.Unmarshal(m, b) +} +func (m *InstrumentationLibrarySpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InstrumentationLibrarySpans.Marshal(b, m, deterministic) +} +func (m *InstrumentationLibrarySpans) XXX_Merge(src proto.Message) { + xxx_messageInfo_InstrumentationLibrarySpans.Merge(m, src) +} +func (m *InstrumentationLibrarySpans) XXX_Size() int { + return xxx_messageInfo_InstrumentationLibrarySpans.Size(m) +} +func (m *InstrumentationLibrarySpans) XXX_DiscardUnknown() { + xxx_messageInfo_InstrumentationLibrarySpans.DiscardUnknown(m) +} + +var xxx_messageInfo_InstrumentationLibrarySpans proto.InternalMessageInfo + +func (m *InstrumentationLibrarySpans) GetInstrumentationLibrary() *v11.InstrumentationLibrary { + if m != nil { + return m.InstrumentationLibrary + } + return nil +} + +func (m *InstrumentationLibrarySpans) GetSpans() []*Span { + if m != nil { + return m.Spans + } + return nil +} + +// Span represents a single operation within a trace. Spans can be +// nested to form a trace tree. Spans may also be linked to other spans +// from the same or different trace and form graphs. Often, a trace +// contains a root span that describes the end-to-end latency, and one +// or more subspans for its sub-operations. A trace can also contain +// multiple root spans, or none at all. Spans do not need to be +// contiguous - there may be gaps or overlaps between spans in a trace. +// +// The next available field id is 17. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes + // is considered invalid. + // + // This field is semantically required. Receiver should generate new + // random trace_id if empty or invalid trace_id was received. + // + // This field is required. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes is considered + // invalid. + // + // This field is semantically required. Receiver should generate new + // random span_id if empty or invalid span_id was received. + // + // This field is required. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanId []byte `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // When null or empty string received - receiver may use string "name" + // as a replacement. There might be smarted algorithms implemented by + // receiver to fix the empty span name. + // + // This field is required. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"` + // attributes is a collection of key/value pairs. The value can be a string, + // an integer, a double or the Boolean values `true` or `false`. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "abc.com/myattribute": true + // "abc.com/score": 10.239 + Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // events is a collection of Event items. + Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `protobuf:"bytes,15,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span) Reset() { *m = Span{} } +func (m *Span) String() string { return proto.CompactTextString(m) } +func (*Span) ProtoMessage() {} +func (*Span) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{2} +} +func (m *Span) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span.Unmarshal(m, b) +} +func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span.Marshal(b, m, deterministic) +} +func (m *Span) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span.Merge(m, src) +} +func (m *Span) XXX_Size() int { + return xxx_messageInfo_Span.Size(m) +} +func (m *Span) XXX_DiscardUnknown() { + xxx_messageInfo_Span.DiscardUnknown(m) +} + +var xxx_messageInfo_Span proto.InternalMessageInfo + +func (m *Span) GetTraceId() []byte { + if m != nil { + return m.TraceId + } + return nil +} + +func (m *Span) GetSpanId() []byte { + if m != nil { + return m.SpanId + } + return nil +} + +func (m *Span) GetTraceState() string { + if m != nil { + return m.TraceState + } + return "" +} + +func (m *Span) GetParentSpanId() []byte { + if m != nil { + return m.ParentSpanId + } + return nil +} + +func (m *Span) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Span) GetKind() Span_SpanKind { + if m != nil { + return m.Kind + } + return Span_SPAN_KIND_UNSPECIFIED +} + +func (m *Span) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *Span) GetEndTimeUnixNano() uint64 { + if m != nil { + return m.EndTimeUnixNano + } + return 0 +} + +func (m *Span) GetAttributes() []*v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +func (m *Span) GetEvents() []*Span_Event { + if m != nil { + return m.Events + } + return nil +} + +func (m *Span) GetDroppedEventsCount() uint32 { + if m != nil { + return m.DroppedEventsCount + } + return 0 +} + +func (m *Span) GetLinks() []*Span_Link { + if m != nil { + return m.Links + } + return nil +} + +func (m *Span) GetDroppedLinksCount() uint32 { + if m != nil { + return m.DroppedLinksCount + } + return 0 +} + +func (m *Span) GetStatus() *Status { + if m != nil { + return m.Status + } + return nil +} + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type Span_Event struct { + // time_unix_nano is the time the event occurred. + TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + Attributes []*v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Event) Reset() { *m = Span_Event{} } +func (m *Span_Event) String() string { return proto.CompactTextString(m) } +func (*Span_Event) ProtoMessage() {} +func (*Span_Event) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{2, 0} +} +func (m *Span_Event) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Event.Unmarshal(m, b) +} +func (m *Span_Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Event.Marshal(b, m, deterministic) +} +func (m *Span_Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Event.Merge(m, src) +} +func (m *Span_Event) XXX_Size() int { + return xxx_messageInfo_Span_Event.Size(m) +} +func (m *Span_Event) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Event proto.InternalMessageInfo + +func (m *Span_Event) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *Span_Event) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Span_Event) GetAttributes() []*v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span_Event) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type Span_Link struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The trace_state associated with the link. + TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + Attributes []*v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Link) Reset() { *m = Span_Link{} } +func (m *Span_Link) String() string { return proto.CompactTextString(m) } +func (*Span_Link) ProtoMessage() {} +func (*Span_Link) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{2, 1} +} +func (m *Span_Link) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Link.Unmarshal(m, b) +} +func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic) +} +func (m *Span_Link) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Link.Merge(m, src) +} +func (m *Span_Link) XXX_Size() int { + return xxx_messageInfo_Span_Link.Size(m) +} +func (m *Span_Link) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Link.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Link proto.InternalMessageInfo + +func (m *Span_Link) GetTraceId() []byte { + if m != nil { + return m.TraceId + } + return nil +} + +func (m *Span_Link) GetSpanId() []byte { + if m != nil { + return m.SpanId + } + return nil +} + +func (m *Span_Link) GetTraceState() string { + if m != nil { + return m.TraceState + } + return "" +} + +func (m *Span_Link) GetAttributes() []*v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span_Link) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // The deprecated status code. This is an optional field. + // + // This field is deprecated and is replaced by the `code` field below. See backward + // compatibility notes below. According to our stability guarantees this field + // will be removed in 12 months, on Oct 22, 2021. All usage of old senders and + // receivers that do not understand the `code` field MUST be phased out by then. + DeprecatedCode Status_DeprecatedStatusCode `protobuf:"varint,1,opt,name=deprecated_code,json=deprecatedCode,proto3,enum=opentelemetry.proto.trace.v1.Status_DeprecatedStatusCode" json:"deprecated_code,omitempty"` // Deprecated: Do not use. + // A developer-facing human readable error message. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // The status code. + Code Status_StatusCode `protobuf:"varint,3,opt,name=code,proto3,enum=opentelemetry.proto.trace.v1.Status_StatusCode" json:"code,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{3} +} +func (m *Status) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Status.Unmarshal(m, b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return xxx_messageInfo_Status.Size(m) +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *Status) GetDeprecatedCode() Status_DeprecatedStatusCode { + if m != nil { + return m.DeprecatedCode + } + return Status_DEPRECATED_STATUS_CODE_OK +} + +func (m *Status) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Status) GetCode() Status_StatusCode { + if m != nil { + return m.Code + } + return Status_STATUS_CODE_UNSET +} + +func init() { + proto.RegisterEnum("opentelemetry.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value) + proto.RegisterEnum("opentelemetry.proto.trace.v1.Status_DeprecatedStatusCode", Status_DeprecatedStatusCode_name, Status_DeprecatedStatusCode_value) + proto.RegisterEnum("opentelemetry.proto.trace.v1.Status_StatusCode", Status_StatusCode_name, Status_StatusCode_value) + proto.RegisterType((*ResourceSpans)(nil), "opentelemetry.proto.trace.v1.ResourceSpans") + proto.RegisterType((*InstrumentationLibrarySpans)(nil), "opentelemetry.proto.trace.v1.InstrumentationLibrarySpans") + proto.RegisterType((*Span)(nil), "opentelemetry.proto.trace.v1.Span") + proto.RegisterType((*Span_Event)(nil), "opentelemetry.proto.trace.v1.Span.Event") + proto.RegisterType((*Span_Link)(nil), "opentelemetry.proto.trace.v1.Span.Link") + proto.RegisterType((*Status)(nil), "opentelemetry.proto.trace.v1.Status") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/trace/v1/trace.proto", fileDescriptor_5c407ac9c675a601) +} + +var fileDescriptor_5c407ac9c675a601 = []byte{ + // 1130 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xc1, 0x6e, 0xdb, 0x46, + 0x10, 0x0d, 0x6d, 0x49, 0x76, 0xc6, 0xb6, 0xcc, 0x6c, 0x9d, 0x84, 0x71, 0x9a, 0x46, 0x50, 0xdd, + 0x44, 0x49, 0x1a, 0xa9, 0x49, 0x51, 0x20, 0x05, 0x1a, 0xb4, 0x34, 0xb9, 0x4e, 0x08, 0xd3, 0xa4, + 0xb0, 0x24, 0xdd, 0xb4, 0x97, 0x05, 0x6d, 0x6e, 0x5d, 0x22, 0xd6, 0x52, 0x20, 0x29, 0x23, 0x39, + 0xf4, 0x43, 0x0a, 0xf4, 0x73, 0x0a, 0xf4, 0x0b, 0x7a, 0xe9, 0xa1, 0x5f, 0xd2, 0x43, 0xb1, 0x4b, + 0xca, 0xb2, 0x0c, 0x91, 0xce, 0x25, 0x17, 0x83, 0x7c, 0xf3, 0xde, 0xbc, 0x99, 0x9d, 0x59, 0x8b, + 0xd0, 0x4b, 0xc6, 0x8c, 0xe7, 0xec, 0x94, 0x8d, 0x58, 0x9e, 0xbe, 0x1f, 0x8c, 0xd3, 0x24, 0x4f, + 0x06, 0x79, 0x1a, 0x1e, 0xb3, 0xc1, 0xd9, 0xb3, 0xe2, 0xa1, 0x2f, 0x41, 0xf4, 0xe9, 0x1c, 0xb3, + 0x00, 0xfb, 0x05, 0xe1, 0xec, 0xd9, 0xf6, 0xe3, 0x45, 0x79, 0x8e, 0x93, 0xd1, 0x28, 0xe1, 0x22, + 0x51, 0xf1, 0x54, 0x88, 0xb6, 0xfb, 0x8b, 0xb8, 0x29, 0xcb, 0x92, 0x49, 0x5a, 0xd8, 0x4e, 0x9f, + 0x0b, 0x7e, 0xf7, 0x6f, 0x05, 0x36, 0x48, 0x09, 0x79, 0xe3, 0x90, 0x67, 0x08, 0xc3, 0xea, 0x94, + 0xa3, 0x29, 0x1d, 0xa5, 0xb7, 0xf6, 0xfc, 0x51, 0x7f, 0x51, 0x79, 0xe7, 0x89, 0xce, 0x9e, 0xf5, + 0xa7, 0x19, 0xc8, 0xb9, 0x14, 0xfd, 0x06, 0xf7, 0x62, 0x9e, 0xe5, 0xe9, 0x64, 0xc4, 0x78, 0x1e, + 0xe6, 0x71, 0xc2, 0xe9, 0x69, 0x7c, 0x94, 0x86, 0xe9, 0x7b, 0x9a, 0x09, 0x1f, 0x6d, 0xa9, 0xb3, + 0xdc, 0x5b, 0x7b, 0xfe, 0x6d, 0xbf, 0xae, 0xf5, 0xbe, 0x35, 0x9f, 0xc2, 0x2e, 0x32, 0xc8, 0x42, + 0xc9, 0xdd, 0xb8, 0x3a, 0xd8, 0xfd, 0x4b, 0x81, 0xbb, 0x35, 0x62, 0xc4, 0xe1, 0x76, 0x45, 0x79, + 0x65, 0xd3, 0xdf, 0x2c, 0x2c, 0xac, 0x3c, 0xeb, 0xca, 0xca, 0xc8, 0xad, 0xc5, 0x45, 0xa1, 0x17, + 0xd0, 0xbc, 0xd8, 0x76, 0xb7, 0xbe, 0x6d, 0x51, 0x23, 0x29, 0x04, 0xdd, 0x3f, 0x00, 0x1a, 0xe2, + 0x1d, 0xdd, 0x81, 0x55, 0x49, 0xa0, 0x71, 0x24, 0x6b, 0x5c, 0x27, 0x2b, 0xf2, 0xdd, 0x8a, 0xd0, + 0x6d, 0x58, 0x11, 0x64, 0x11, 0x59, 0x92, 0x91, 0x96, 0x78, 0xb5, 0x22, 0x74, 0x1f, 0xd6, 0x0a, + 0x4d, 0x96, 0x87, 0x39, 0xd3, 0x96, 0x3b, 0x4a, 0xef, 0x3a, 0x01, 0x09, 0x79, 0x02, 0x41, 0x3b, + 0xd0, 0x1e, 0x87, 0x29, 0xe3, 0x39, 0x9d, 0x26, 0x68, 0xc8, 0x04, 0xeb, 0x05, 0xea, 0x15, 0x69, + 0x10, 0x34, 0x78, 0x38, 0x62, 0x5a, 0x53, 0xea, 0xe5, 0x33, 0xfa, 0x1e, 0x1a, 0x6f, 0x63, 0x1e, + 0x69, 0xad, 0x8e, 0xd2, 0x6b, 0x3f, 0x7f, 0x72, 0x75, 0x43, 0xf2, 0xcf, 0x7e, 0xcc, 0x23, 0x22, + 0x85, 0x68, 0x00, 0x5b, 0x59, 0x1e, 0xa6, 0x39, 0xcd, 0xe3, 0x11, 0xa3, 0x13, 0x1e, 0xbf, 0xa3, + 0x3c, 0xe4, 0x89, 0xb6, 0xd2, 0x51, 0x7a, 0x2d, 0x72, 0x43, 0xc6, 0xfc, 0x78, 0xc4, 0x02, 0x1e, + 0xbf, 0x73, 0x42, 0x9e, 0xa0, 0x27, 0x80, 0x18, 0x8f, 0x2e, 0xd3, 0x57, 0x25, 0x7d, 0x93, 0xf1, + 0x68, 0x8e, 0xfc, 0x0a, 0x20, 0xcc, 0xf3, 0x34, 0x3e, 0x9a, 0xe4, 0x2c, 0xd3, 0xae, 0xcb, 0x53, + 0x7f, 0x78, 0xc5, 0x4c, 0xf7, 0xd9, 0xfb, 0xc3, 0xf0, 0x74, 0xc2, 0xc8, 0x05, 0x29, 0x7a, 0x01, + 0x5a, 0x94, 0x26, 0xe3, 0x31, 0x8b, 0xe8, 0x0c, 0xa5, 0xc7, 0xc9, 0x84, 0xe7, 0x1a, 0x74, 0x94, + 0xde, 0x06, 0xb9, 0x55, 0xc6, 0xf5, 0xf3, 0xb0, 0x21, 0xa2, 0xe8, 0x07, 0x68, 0xb1, 0x33, 0xc6, + 0xf3, 0x4c, 0x5b, 0x93, 0xf6, 0xbd, 0x0f, 0x38, 0x23, 0x2c, 0x04, 0xa4, 0xd4, 0xa1, 0xaf, 0x60, + 0x6b, 0xea, 0x5d, 0x20, 0xa5, 0xef, 0xba, 0xf4, 0x45, 0x65, 0x4c, 0x6a, 0x4a, 0xcf, 0x97, 0xd0, + 0x3c, 0x8d, 0xf9, 0xdb, 0x4c, 0xdb, 0xa8, 0xe9, 0x78, 0xde, 0xd2, 0x8e, 0xf9, 0x5b, 0x52, 0xa8, + 0x50, 0x1f, 0x3e, 0x99, 0x1a, 0x4a, 0xa0, 0xf4, 0x6b, 0x4b, 0xbf, 0x1b, 0x65, 0x48, 0x08, 0x4a, + 0xbb, 0xef, 0xa0, 0x25, 0x36, 0x6b, 0x92, 0x69, 0x9b, 0xf2, 0xd6, 0xec, 0x5c, 0xe1, 0x27, 0xb9, + 0xa4, 0xd4, 0x6c, 0xff, 0xa9, 0x40, 0x53, 0x16, 0x2f, 0xd6, 0xf0, 0xd2, 0x58, 0x15, 0x39, 0xd6, + 0xf5, 0xfc, 0xe2, 0x4c, 0xa7, 0x6b, 0xb8, 0x74, 0x61, 0x0d, 0xe7, 0xe7, 0xbc, 0xfc, 0x71, 0xe6, + 0xdc, 0xa8, 0x9b, 0xf3, 0xf6, 0xbf, 0x0a, 0x34, 0xc4, 0x99, 0x7c, 0x9c, 0x1b, 0x3a, 0xdf, 0x60, + 0xe3, 0xe3, 0x34, 0xd8, 0xac, 0x6b, 0xb0, 0xfb, 0xbb, 0x02, 0xab, 0xd3, 0xcb, 0x8b, 0xee, 0xc0, + 0x4d, 0x6f, 0xa8, 0x3b, 0x74, 0xdf, 0x72, 0x4c, 0x1a, 0x38, 0xde, 0x10, 0x1b, 0xd6, 0x9e, 0x85, + 0x4d, 0xf5, 0x1a, 0xba, 0x05, 0x68, 0x16, 0xb2, 0x1c, 0x1f, 0x13, 0x47, 0xb7, 0x55, 0x05, 0x6d, + 0x81, 0x3a, 0xc3, 0x3d, 0x4c, 0x0e, 0x31, 0x51, 0x97, 0xe6, 0x51, 0xc3, 0xb6, 0xb0, 0xe3, 0xab, + 0xcb, 0xf3, 0x39, 0x86, 0xc4, 0x35, 0x03, 0x03, 0x13, 0xb5, 0x31, 0x8f, 0x1b, 0xae, 0xe3, 0x05, + 0x07, 0x98, 0xa8, 0xcd, 0xee, 0x7f, 0x2b, 0xd0, 0x2a, 0xd6, 0x0a, 0xfd, 0x02, 0x9b, 0x11, 0x1b, + 0xa7, 0xec, 0x38, 0xcc, 0x59, 0x44, 0x8f, 0x93, 0xa8, 0xf8, 0x01, 0x6b, 0x5f, 0xf5, 0x23, 0x53, + 0xc8, 0xfb, 0xe6, 0xb9, 0xb6, 0x00, 0x8c, 0x24, 0x62, 0xbb, 0x4b, 0x9a, 0x42, 0xda, 0xb3, 0xac, + 0x02, 0x43, 0x1a, 0xac, 0x8c, 0x58, 0x96, 0x85, 0x27, 0xd3, 0x4d, 0x9c, 0xbe, 0x22, 0x03, 0x1a, + 0xd2, 0x76, 0x59, 0xda, 0x0e, 0x3e, 0xc8, 0x76, 0x66, 0x46, 0xa4, 0xb8, 0xfb, 0x4f, 0x13, 0xb6, + 0x16, 0xd5, 0x82, 0xee, 0xc1, 0x1d, 0x13, 0x0f, 0x09, 0x36, 0x74, 0x1f, 0x9b, 0xd4, 0xf3, 0x75, + 0x3f, 0xf0, 0xa8, 0xe1, 0x9a, 0x98, 0xba, 0xfb, 0xea, 0x35, 0xb4, 0x03, 0x9d, 0x8a, 0xb0, 0xa1, + 0x3b, 0x06, 0xb6, 0x6d, 0x6c, 0xaa, 0x0a, 0xea, 0xc1, 0x4e, 0x05, 0x2b, 0x70, 0xf6, 0x1d, 0xf7, + 0x47, 0x87, 0x62, 0x42, 0x5c, 0x31, 0x9f, 0x27, 0xf0, 0xb0, 0x82, 0x69, 0x39, 0x87, 0xba, 0x6d, + 0x99, 0x54, 0x27, 0xaf, 0x82, 0x83, 0x62, 0x6c, 0x5f, 0x42, 0xaf, 0x82, 0x6c, 0x62, 0xdd, 0xb4, + 0x2d, 0x07, 0x53, 0xfc, 0xc6, 0xc0, 0xd8, 0xc4, 0xa6, 0xda, 0xa8, 0x29, 0xd5, 0x71, 0x7d, 0xba, + 0xe7, 0x06, 0x8e, 0xa9, 0x36, 0xd1, 0x23, 0xf8, 0xa2, 0x82, 0xa5, 0xdb, 0x04, 0xeb, 0xe6, 0x4f, + 0x14, 0xbf, 0xb1, 0x3c, 0xdf, 0x53, 0x5b, 0x35, 0xf6, 0x43, 0x4c, 0x0e, 0x2c, 0xcf, 0xb3, 0x5c, + 0x87, 0x9a, 0xd8, 0x11, 0x7b, 0xba, 0x82, 0x9e, 0xc2, 0xa3, 0x0a, 0x36, 0xc1, 0x9e, 0x1b, 0x10, + 0x43, 0x14, 0xfb, 0x5a, 0x0f, 0x3c, 0x1f, 0x9b, 0xea, 0x2a, 0xea, 0xc3, 0xe3, 0x0a, 0xfa, 0x9e, + 0x6e, 0xd9, 0x58, 0xac, 0x29, 0x36, 0x5c, 0xc7, 0xb4, 0x7c, 0xcb, 0x75, 0xd4, 0xeb, 0xa8, 0x0b, + 0x9f, 0x55, 0xd5, 0xbd, 0xeb, 0x12, 0x91, 0x13, 0xd0, 0x43, 0xf8, 0xbc, 0x6a, 0x96, 0x81, 0x4f, + 0xdd, 0x3d, 0x4a, 0x74, 0xe7, 0x15, 0x56, 0xd7, 0x6a, 0xe7, 0x65, 0x1d, 0x0c, 0x6d, 0x2c, 0x06, + 0x80, 0x4d, 0x75, 0xbd, 0xe6, 0xb8, 0xa6, 0x57, 0xb1, 0x1c, 0xed, 0x06, 0x7a, 0x00, 0xdd, 0xca, + 0xa4, 0xfa, 0xa1, 0x6e, 0xd9, 0xfa, 0xae, 0x8d, 0xd5, 0x76, 0xcd, 0x9c, 0x4c, 0xdd, 0xd7, 0xa9, + 0xed, 0x7a, 0x9e, 0xba, 0x89, 0x1e, 0xc3, 0x83, 0xea, 0x6c, 0x81, 0xff, 0x1a, 0x3b, 0xbe, 0x25, + 0x63, 0xaa, 0xda, 0x75, 0x00, 0x2e, 0x6c, 0xf4, 0x4d, 0xb8, 0x31, 0x4f, 0xf7, 0xb0, 0xaf, 0x5e, + 0x43, 0x08, 0xda, 0x97, 0xb6, 0x5b, 0xb9, 0x4c, 0x2d, 0x97, 0x74, 0x97, 0xc3, 0xfd, 0x38, 0xa9, + 0xbd, 0x67, 0xbb, 0xe0, 0x8b, 0xa7, 0xa1, 0x00, 0x87, 0xca, 0xcf, 0x2f, 0x4f, 0xe2, 0xfc, 0xd7, + 0xc9, 0x91, 0xf8, 0x67, 0x39, 0x10, 0xb2, 0xa7, 0xb3, 0x8f, 0xe5, 0xb9, 0x2c, 0x4f, 0x8b, 0x4f, + 0xe7, 0x13, 0xc6, 0x07, 0x27, 0xb3, 0xaf, 0xf6, 0xa3, 0x96, 0x84, 0xbf, 0xfe, 0x3f, 0x00, 0x00, + 0xff, 0xff, 0x63, 0x4b, 0xfa, 0x64, 0xdc, 0x0b, 0x00, 0x00, +} diff --git a/route/route.go b/route/route.go index 3dbdcf93ff..eca2635ac1 100644 --- a/route/route.go +++ b/route/route.go @@ -4,12 +4,15 @@ import ( "bytes" "compress/gzip" "context" + "encoding/binary" + "encoding/hex" "encoding/json" "errors" "fmt" "io" "io/ioutil" "math" + "net" "net/http" "strconv" "sync" @@ -19,6 +22,9 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/klauspost/compress/zstd" "github.com/vmihailenco/msgpack/v4" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" "github.com/honeycombio/refinery/collect" "github.com/honeycombio/refinery/config" @@ -27,13 +33,21 @@ import ( "github.com/honeycombio/refinery/sharder" "github.com/honeycombio/refinery/transmit" "github.com/honeycombio/refinery/types" + + collectortrace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/collector/trace/v1" + common "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" + trace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/trace/v1" ) const ( // numZstdDecoders is set statically here - we may make it into a config option // A normal practice might be to use some multiple of the CPUs, but that goes south // in kubernetes - numZstdDecoders = 4 + numZstdDecoders = 4 + traceIDShortLength = 8 + traceIDLongLength = 16 + GRPCMessageSizeMax int = 5000000 // 5MB + defaultSampleRate = 1 ) type Router struct { @@ -61,8 +75,9 @@ type Router struct { zstdDecoders chan *zstd.Decoder - server *http.Server - doneWG sync.WaitGroup + server *http.Server + grpcServer *grpc.Server + doneWG sync.WaitGroup } type BatchResponse struct { @@ -145,13 +160,19 @@ func (r *Router) LnS(incomingOrPeer string) { // pass everything else through unmolested muxxer.PathPrefix("/").HandlerFunc(r.proxy).Name("proxy") - var listenAddr string + var listenAddr, grpcAddr string if r.incomingOrPeer == "incoming" { listenAddr, err = r.Config.GetListenAddr() if err != nil { r.iopLogger.Error().Logf("failed to get listen addr config: %s", err) return } + // GRPC listen addr is optional, err means addr was not empty and invalid + grpcAddr, err = r.Config.GetGRPCListenAddr() + if err != nil { + r.iopLogger.Error().Logf("failed to get grpc listen addr config: %s", err) + return + } } else { listenAddr, err = r.Config.GetPeerListenAddr() if err != nil { @@ -166,6 +187,27 @@ func (r *Router) LnS(incomingOrPeer string) { Handler: muxxer, } + if len(grpcAddr) > 0 { + l, err := net.Listen("tcp", grpcAddr) + if err != nil { + r.iopLogger.Error().Logf("failed to listen to grpc addr: " + grpcAddr) + } + + r.iopLogger.Info().Logf("gRPC listening on %s", grpcAddr) + serverOpts := []grpc.ServerOption{ + grpc.MaxSendMsgSize(GRPCMessageSizeMax), // default is math.MaxInt32 + grpc.MaxRecvMsgSize(GRPCMessageSizeMax), // default is 4MB + grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: 10 * time.Second, + Timeout: 2 * time.Second, + MaxConnectionIdle: time.Minute, + }), + } + r.grpcServer = grpc.NewServer(serverOpts...) + collectortrace.RegisterTraceServiceServer(r.grpcServer, r) + go r.grpcServer.Serve(l) + } + r.doneWG.Add(1) go func() { defer r.doneWG.Done() @@ -183,6 +225,9 @@ func (r *Router) Stop() error { if err != nil { return err } + if r.grpcServer != nil { + r.grpcServer.GracefulStop() + } r.doneWG.Wait() return nil } @@ -337,6 +382,110 @@ func (r *Router) batch(w http.ResponseWriter, req *http.Request) { w.Write(response) } +func (r *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServiceRequest) (*collectortrace.ExportTraceServiceResponse, error) { + + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + r.Logger.Error().Logf("Unable to retreive metadata from OTLP request.") + return &collectortrace.ExportTraceServiceResponse{}, nil + } + + // requestID is used to track a requst as it moves between refinery nodes (peers) + // the OTLP handler only receives incoming (not peer) requests for now so will be empty here + var requestID types.RequestIDContextKey + debugLog := r.iopLogger.Debug().WithField("request_id", requestID) + + apiKey, dataset := getAPIKeyAndDatasetFromMetadata(md) + if apiKey == "" { + r.Logger.Error().Logf("Received OTLP request without Honeycomb APIKey header") + return &collectortrace.ExportTraceServiceResponse{}, nil + } + if dataset == "" { + r.Logger.Error().Logf("Received OTLP request without Honeycomb dataset header") + return &collectortrace.ExportTraceServiceResponse{}, nil + } + + apiHost, err := r.Config.GetHoneycombAPI() + if err != nil { + r.Logger.Error().Logf("Unable to retrieve APIHost from config while processing OTLP batch") + return &collectortrace.ExportTraceServiceResponse{}, nil + } + + for _, resourceSpan := range req.ResourceSpans { + resourceAttrs := make(map[string]interface{}) + + if resourceSpan.Resource != nil { + addAttributesToMap(resourceAttrs, resourceSpan.Resource.Attributes) + } + + for _, librarySpan := range resourceSpan.InstrumentationLibrarySpans { + library := librarySpan.InstrumentationLibrary + if library != nil { + if len(library.Name) > 0 { + resourceAttrs["library.name"] = library.Name + } + if len(library.Version) > 0 { + resourceAttrs["library.version"] = library.Version + } + } + + for _, span := range librarySpan.GetSpans() { + traceID := bytesToTraceID(span.TraceId) + spanID := hex.EncodeToString(span.SpanId) + timestamp := time.Unix(0, int64(span.StartTimeUnixNano)).UTC() + + eventAttrs := map[string]interface{}{ + "trace.trace_id": traceID, + "trace.span_id": spanID, + "type": getSpanKind(span.Kind), + "name": span.Name, + "duration_ms": float64(span.EndTimeUnixNano-span.StartTimeUnixNano) / float64(time.Millisecond), + "status_code": int32(span.Status.Code), + } + if span.ParentSpanId != nil { + eventAttrs["trace.parent_id"] = hex.EncodeToString(span.ParentSpanId) + } + if r.getSpanStatusCode(span.Status) == trace.Status_STATUS_CODE_ERROR { + eventAttrs["error"] = true + } + if len(span.Status.Message) > 0 { + eventAttrs["status_message"] = span.Status.Message + } + if span.Attributes != nil { + addAttributesToMap(eventAttrs, span.Attributes) + } + + sampleRate, err := getSampleRateFromAttributes(eventAttrs) + if err != nil { + debugLog.WithField("error", err.Error()).WithField("sampleRate", eventAttrs["sampleRate"]).Logf("error parsing sampleRate") + } + + // copy resource attributes to event attributes + for k, v := range resourceAttrs { + eventAttrs[k] = v + } + + event := &types.Event{ + Context: ctx, + APIHost: apiHost, + APIKey: apiKey, + Dataset: dataset, + SampleRate: uint(sampleRate), + Timestamp: timestamp, + Data: eventAttrs, + } + + err = r.processEvent(event, requestID) + if err != nil { + r.Logger.Error().Logf("Error processing event: " + err.Error()) + } + } + } + } + + return &collectortrace.ExportTraceServiceResponse{}, nil +} + func (r *Router) processEvent(ev *types.Event, reqID interface{}) error { debugLog := r.iopLogger.Debug(). WithField("request_id", reqID). @@ -553,3 +702,123 @@ func unmarshal(r *http.Request, data io.Reader, v interface{}) error { return jsoniter.NewDecoder(data).Decode(v) } } + +func getAPIKeyAndDatasetFromMetadata(md metadata.MD) (apiKey string, dataset string) { + apiKey = getFirstValueFromMetadata(types.APIKeyHeader, md) + if apiKey == "" { + apiKey = getFirstValueFromMetadata(types.APIKeyHeaderShort, md) + } + dataset = getFirstValueFromMetadata(types.DatasetHeader, md) + + return apiKey, dataset +} + +// getFirstValueFromMetadata returns the first value of a metadata entry using a +// case-insensitive key +func getFirstValueFromMetadata(key string, md metadata.MD) string { + if values := md.Get(key); len(values) > 0 { + return values[0] + } + return "" +} + +func addAttributesToMap(attrs map[string]interface{}, attributes []*common.KeyValue) { + for _, attr := range attributes { + if attr.Key == "" { + continue + } + switch attr.Value.Value.(type) { + case *common.AnyValue_StringValue: + attrs[attr.Key] = attr.Value.GetStringValue() + case *common.AnyValue_BoolValue: + attrs[attr.Key] = attr.Value.GetBoolValue() + case *common.AnyValue_DoubleValue: + attrs[attr.Key] = attr.Value.GetDoubleValue() + case *common.AnyValue_IntValue: + attrs[attr.Key] = attr.Value.GetIntValue() + } + } +} + +func getSpanKind(kind trace.Span_SpanKind) string { + switch kind { + case trace.Span_SPAN_KIND_CLIENT: + return "client" + case trace.Span_SPAN_KIND_SERVER: + return "server" + case trace.Span_SPAN_KIND_PRODUCER: + return "producer" + case trace.Span_SPAN_KIND_CONSUMER: + return "consumer" + case trace.Span_SPAN_KIND_INTERNAL: + return "internal" + case trace.Span_SPAN_KIND_UNSPECIFIED: + fallthrough + default: + return "unspecified" + } +} + +// bytesToTraceID returns an ID suitable for use for spans and traces. Before +// encoding the bytes as a hex string, we want to handle cases where we are +// given 128-bit IDs with zero padding, e.g. 0000000000000000f798a1e7f33c8af6. +// To do this, we borrow a strategy from Jaeger [1] wherein we split the byte +// sequence into two parts. The leftmost part could contain all zeros. We use +// that to determine whether to return a 64-bit hex encoded string or a 128-bit +// one. +// +// [1]: https://github.com/jaegertracing/jaeger/blob/cd19b64413eca0f06b61d92fe29bebce1321d0b0/model/ids.go#L81 +func bytesToTraceID(traceID []byte) string { + // binary.BigEndian.Uint64() does a bounds check on traceID which will + // cause a panic if traceID is fewer than 8 bytes. In this case, we don't + // need to check for zero padding on the high part anyway, so just return a + // hex string. + if len(traceID) < traceIDShortLength { + return fmt.Sprintf("%x", traceID) + } + var low uint64 + if len(traceID) == traceIDLongLength { + low = binary.BigEndian.Uint64(traceID[traceIDShortLength:]) + if high := binary.BigEndian.Uint64(traceID[:traceIDShortLength]); high != 0 { + return fmt.Sprintf("%016x%016x", high, low) + } + } else { + low = binary.BigEndian.Uint64(traceID) + } + + return fmt.Sprintf("%016x", low) +} + +// getSpanStatusCode checks the value of both the deprecated code and code fields +// on the span status and using the rules specified in the backward compatibility +// notes in the protobuf definitions. See: +// +// https://github.com/open-telemetry/opentelemetry-proto/blob/59c488bfb8fb6d0458ad6425758b70259ff4a2bd/opentelemetry/proto/trace/v1/trace.proto#L230 +func (r *Router) getSpanStatusCode(status *trace.Status) trace.Status_StatusCode { + if status.Code == trace.Status_STATUS_CODE_UNSET { + if status.DeprecatedCode == trace.Status_DEPRECATED_STATUS_CODE_OK { + return trace.Status_STATUS_CODE_UNSET + } + return trace.Status_STATUS_CODE_ERROR + } + return status.Code +} + +func getSampleRateFromAttributes(attributes map[string]interface{}) (int, error) { + var err error + sampleRate := defaultSampleRate + if attributes["sampleRate"] != nil { + switch attributes["sampleRate"].(type) { + case string: + sampleRate, err = strconv.Atoi(attributes["sampleRate"].(string)) + case int: + sampleRate = attributes["sampleRate"].(int) + default: + err = fmt.Errorf("Unrecognised sampleRate datatype - %T", attributes["sampleRate"]) + } + // remove sampleRate from event fields + delete(attributes, "sampleRate") + } + + return sampleRate, err +} diff --git a/route/route_test.go b/route/route_test.go index c4e75a94ca..8a5a5f754b 100644 --- a/route/route_test.go +++ b/route/route_test.go @@ -18,6 +18,7 @@ import ( "github.com/honeycombio/refinery/sharder" "github.com/klauspost/compress/zstd" "github.com/vmihailenco/msgpack/v4" + "google.golang.org/grpc/metadata" ) func TestDecompression(t *testing.T) { @@ -237,6 +238,128 @@ func TestUnmarshal(t *testing.T) { } } +func TestGetAPIKeyAndDatasetFromMetadataCaseInsensitive(t *testing.T) { + const ( + apiKeyValue = "test-apikey" + datasetValue = "test-dataset" + ) + + tests := []struct { + name string + apikeyHeader string + datasetHeader string + }{ + { + name: "lowercase", + apikeyHeader: "x-honeycomb-team", + datasetHeader: "x-honeycomb-dataset", + }, + { + name: "uppercase", + apikeyHeader: "X-HONEYCOMB-TEAM", + datasetHeader: "X-HONEYCOMB-DATASET", + }, + { + name: "mixed-case", + apikeyHeader: "x-HoNeYcOmB-tEaM", + datasetHeader: "X-hOnEyCoMb-DaTaSeT", + }, + { + name: "lowercase-short", + apikeyHeader: "x-hny-team", + datasetHeader: "x-honeycomb-dataset", + }, + { + name: "uppercase-short", + apikeyHeader: "X-HNY-TEAM", + datasetHeader: "X-HONEYCOMB-DATASET", + }, + { + name: "mixed-case-short", + apikeyHeader: "X-hNy-TeAm", + datasetHeader: "X-hOnEyCoMb-DaTaSeT", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + md := metadata.MD{} + md.Set(tt.apikeyHeader, apiKeyValue) + md.Set(tt.datasetHeader, datasetValue) + + apikey, dataset := getAPIKeyAndDatasetFromMetadata(md) + if apikey != apiKeyValue { + t.Errorf("got: %s\n\twant: %v", apikey, apiKeyValue) + } + if dataset != datasetValue { + t.Errorf("got: %s\n\twant: %v", dataset, datasetValue) + } + }) + } +} + +func TestGetSampleRateFromAttributes(t *testing.T) { + const ( + defaultSampleRate = 1 + ) + tests := []struct { + name string + attrKey string + attrValue interface{} + expectedValue int + }{ + { + name: "missing attr gets default value", + attrKey: "", + attrValue: nil, + expectedValue: defaultSampleRate, + }, + { + name: "can parse integer value", + attrKey: "sampleRate", + attrValue: 5, + expectedValue: 5, + }, + { + name: "can parse string value", + attrKey: "sampleRate", + attrValue: "5", + expectedValue: 5, + }, + { + name: "does not parse float, gets default value", + attrKey: "sampleRate", + attrValue: 0.25, + expectedValue: defaultSampleRate, + }, + { + name: "does not parse bool, gets default value", + attrKey: "sampleRate", + attrValue: true, + expectedValue: defaultSampleRate, + }, + { + name: "does not parse struct, gets default value", + attrKey: "sampleRate", + attrValue: struct{}{}, + expectedValue: defaultSampleRate, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + attrs := map[string]interface{}{ + tt.attrKey: tt.attrValue, + } + + sampleRate, _ := getSampleRateFromAttributes(attrs) + if sampleRate != tt.expectedValue { + t.Errorf("got: %d\n\twant: %d", sampleRate, tt.expectedValue) + } + }) + } +} + func TestDebugTrace(t *testing.T) { req, _ := http.NewRequest("GET", "/debug/trace/123abcdef", nil) req = mux.SetURLVars(req, map[string]string{"traceID": "123abcdef"}) diff --git a/types/event.go b/types/event.go index 0ef2157889..0a1115155c 100644 --- a/types/event.go +++ b/types/event.go @@ -9,6 +9,7 @@ const ( APIKeyHeader = "X-Honeycomb-Team" // libhoney-js uses this APIKeyHeaderShort = "X-Hny-Team" + DatasetHeader = "X-Honeycomb-Dataset" SampleRateHeader = "X-Honeycomb-Samplerate" TimestampHeader = "X-Honeycomb-Event-Time" )