diff --git a/.travis.yml b/.travis.yml index b167d95..35256d2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,6 @@ language: go go: - - 1.6.4 - 1.9.2 - tip diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index c957e2c..91df038 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -10,8 +10,8 @@ }, { "ImportPath": "github.com/Shopify/sarama", - "Comment": "v1.8.0-27-g68920ff", - "Rev": "68920ff1be3bbd31449ab1e2c255c50cedeb755c" + "Comment": "v1.15.0-19-gf933fb4", + "Rev": "f933fb4b0b80ff5518a04dd6764182f39e8163c1" }, { "ImportPath": "github.com/cloudfoundry-incubator/uaago", @@ -99,6 +99,24 @@ "ImportPath": "github.com/cloudfoundry/noaa/consumer/internal", "Comment": "v2.0.0-16-ga1d4a94", "Rev": "a1d4a94e2be6711111893c59538976d07ef7ea98" + }, + { + "ImportPath": "github.com/eapache/go-xerial-snappy", + "Rev": "bb955e01b9346ac19dc29eb16586c90ded99a98c" + }, + { + "ImportPath": "github.com/pierrec/lz4", + "Comment": "v1.1", + "Rev": "2fcda4cb7018ce05a25959d2fe08c83e3329f169" + }, + { + "ImportPath": "github.com/rcrowley/go-metrics", + "Rev": "e181e095bae94582363434144c61a9653aff6e50" + }, + { + "ImportPath": "github.com/pierrec/xxHash/xxHash32", + "Comment": "v0.1-11-ga0006b1", + "Rev": "a0006b13c722f7f12368c00a3d3c2ae8a999a0c6" } ] } diff --git a/config.go b/config.go index da2e4a3..ef995c2 100644 --- a/config.go +++ b/config.go @@ -45,6 +45,26 @@ type Kafka struct { RepartitionMax int `toml:"repartition_max"` Compression string `toml:"compression"` // ("gzip", "snappy" or "none", default: "none") + + // EnableTLS, if set, will connect to Kafka with TLS instead of plaintext. + EnableTLS bool `toml:"enable_tls"` + + // CACerts is a list of CAs certificates used to verify the host. + // Usually there is only one, however multiple can be specified to allow + // for rotation. These should be PEM encoded CERTIFICATEs. + // If none are specified, then the system CA pool is used. + // Ignored unless enable_tls is set. + CACerts []string `toml:"ca_certificates"` + + // ClientKey is used with the client certificate to identify this client + // to Kafka. This should be a PEM encoded RSA PRIVATE KEY. + // Ignored unless enable_tls is set. + ClientKey string `toml:"private_key"` + + // ClientCertificate is used with the client key to identify this client + // to Kafka. This should be a PEM encoded CERTIFICATE. + // Ignored unless enable_tls is set. + ClientCert string `toml:"certificate"` } type Topic struct { diff --git a/kafka.go b/kafka.go index 5be8355..1c8abed 100644 --- a/kafka.go +++ b/kafka.go @@ -1,7 +1,10 @@ package main import ( + "crypto/tls" + "crypto/x509" "encoding/binary" + "errors" "fmt" "log" "sync" @@ -27,6 +30,43 @@ func NewKafkaProducer(logger *log.Logger, stats *Stats, config *Config) (NozzleP // TODO (tcnksm): Enable to configure more properties. producerConfig := sarama.NewConfig() + if config.Kafka.EnableTLS { + if config.Kafka.ClientCert == "" { + return nil, errors.New("please specify client_certificate") + } + if config.Kafka.ClientKey == "" { + return nil, errors.New("please specify private_key") + } + + producerConfig.Net.TLS.Enable = true + if producerConfig.Net.TLS.Config == nil { + producerConfig.Net.TLS.Config = &tls.Config{} + } + + if len(config.Kafka.CACerts) == 0 { + var err error + producerConfig.Net.TLS.Config.RootCAs, err = x509.SystemCertPool() + if err != nil { + return nil, err + } + } else { + producerConfig.Net.TLS.Config.RootCAs = x509.NewCertPool() + for _, certString := range config.Kafka.CACerts { + if !producerConfig.Net.TLS.Config.RootCAs.AppendCertsFromPEM([]byte(certString)) { + return nil, errors.New("no certs in ca pem") + } + } + } + + cert, err := tls.X509KeyPair([]byte(config.Kafka.ClientCert), []byte(config.Kafka.ClientKey)) + if err != nil { + return nil, err + } + + producerConfig.Net.TLS.Config.Certificates = []tls.Certificate{cert} + producerConfig.Net.TLS.Config.BuildNameToCertificate() + } + producerConfig.Producer.Partitioner = sarama.NewRoundRobinPartitioner producerConfig.Producer.Return.Successes = true producerConfig.Producer.RequiredAcks = sarama.WaitForAll diff --git a/tls_test.go b/tls_test.go new file mode 100644 index 0000000..4c4764b --- /dev/null +++ b/tls_test.go @@ -0,0 +1,273 @@ +package main + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "net" + "testing" + "time" + + "github.com/Shopify/sarama" +) + +func TestTLS(t *testing.T) { + _, err := NewKafkaProducer(nil, NewStats(), &Config{ + Kafka: Kafka{ + EnableTLS: true, + ClientCert: "", + ClientKey: "", + }, + }) + if err == nil || err.Error() != "please specify client_certificate" { + t.Fatal("expected fail:", err) + } + _, err = NewKafkaProducer(nil, NewStats(), &Config{ + Kafka: Kafka{ + EnableTLS: true, + ClientCert: "foo", + ClientKey: "", + }, + }) + if err == nil || err.Error() != "please specify private_key" { + t.Fatal("expected fail:", err) + } + _, err = NewKafkaProducer(nil, NewStats(), &Config{ + Kafka: Kafka{ + EnableTLS: true, + ClientCert: "foo", + ClientKey: "bar", + }, + }) + if err == nil || err.Error() != "tls: failed to find any PEM data in certificate input" { + t.Fatal("expected fail:", err) + } + + cakey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + + clientkey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + + hostkey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + + nvb := time.Now().Add(-1 * time.Hour) + nva := time.Now().Add(1 * time.Hour) + + caTemplate := &x509.Certificate{ + Subject: pkix.Name{CommonName: "ca"}, + Issuer: pkix.Name{CommonName: "ca"}, + SerialNumber: big.NewInt(0), + NotAfter: nva, + NotBefore: nvb, + IsCA: true, + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageCertSign, + } + caDer, err := x509.CreateCertificate(rand.Reader, caTemplate, caTemplate, &cakey.PublicKey, cakey) + if err != nil { + t.Fatal(err) + } + caFinalCert, err := x509.ParseCertificate(caDer) + if err != nil { + t.Fatal(err) + } + + hostDer, err := x509.CreateCertificate(rand.Reader, &x509.Certificate{ + Subject: pkix.Name{CommonName: "host"}, + Issuer: pkix.Name{CommonName: "ca"}, + IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1)}, + SerialNumber: big.NewInt(0), + NotAfter: nva, + NotBefore: nvb, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }, caFinalCert, &hostkey.PublicKey, cakey) + if err != nil { + t.Fatal(err) + } + + clientDer, err := x509.CreateCertificate(rand.Reader, &x509.Certificate{ + Subject: pkix.Name{CommonName: "client"}, + Issuer: pkix.Name{CommonName: "ca"}, + SerialNumber: big.NewInt(0), + NotAfter: nva, + NotBefore: nvb, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + }, caFinalCert, &clientkey.PublicKey, cakey) + if err != nil { + t.Fatal(err) + } + + clientCertPem := string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: clientDer, + })) + clientKeyPem := string(pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(clientkey), + })) + + hostCertPem := string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: hostDer, + })) + hostKeyPem := string(pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(hostkey), + })) + + _, err = NewKafkaProducer(nil, NewStats(), &Config{ + Kafka: Kafka{ + EnableTLS: true, + ClientCert: clientCertPem, + ClientKey: clientKeyPem, + }, + }) + if err == nil || err.Error() != "brokers are not provided" { + t.Fatal("expected fail:", err) + } + + caPem := string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: caDer, + })) + + pool := x509.NewCertPool() + pool.AddCert(caFinalCert) + + // Fail with system CAs + doListenerTLSTest(t, false, &tls.Config{ + Certificates: []tls.Certificate{tls.Certificate{ + Certificate: [][]byte{hostDer}, + PrivateKey: hostkey, + }}, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: pool, + }, func(addr string) { + _, err = NewKafkaProducer(nil, NewStats(), &Config{ + Kafka: Kafka{ + EnableTLS: true, + ClientCert: clientCertPem, + ClientKey: clientKeyPem, + Brokers: []string{addr}, + Topic: Topic{ + LogMessage: "foo", + }, + }, + }) + if err == nil { + t.Fatal("Should fail as we have the wrong system CA") + } + }) + + // Fail with no TLS + doListenerTLSTest(t, false, &tls.Config{ + Certificates: []tls.Certificate{tls.Certificate{ + Certificate: [][]byte{hostDer}, + PrivateKey: hostkey, + }}, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: pool, + }, func(addr string) { + _, err = NewKafkaProducer(nil, NewStats(), &Config{ + Kafka: Kafka{ + EnableTLS: false, + Brokers: []string{addr}, + Topic: Topic{ + LogMessage: "foo", + }, + }, + }) + if err == nil { + t.Fatal("Should fail as we have the wrong system CA") + } + }) + + // Fail with wrong key for cert + doListenerTLSTest(t, false, &tls.Config{ + Certificates: []tls.Certificate{tls.Certificate{ + Certificate: [][]byte{hostDer}, + PrivateKey: hostkey, + }}, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: pool, + }, func(addr string) { + _, err = NewKafkaProducer(nil, NewStats(), &Config{ + Kafka: Kafka{ + EnableTLS: true, + ClientCert: hostCertPem, + ClientKey: hostKeyPem, + CACerts: []string{caPem}, + Brokers: []string{addr}, + Topic: Topic{ + LogMessage: "foo", + }, + }, + }) + if err == nil { + t.Fatal("wrong type of cert") + } + }) + + // Try to actually work + doListenerTLSTest(t, true, &tls.Config{ + Certificates: []tls.Certificate{tls.Certificate{ + Certificate: [][]byte{hostDer}, + PrivateKey: hostkey, + }}, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: pool, + }, func(addr string) { + _, err = NewKafkaProducer(nil, NewStats(), &Config{ + Kafka: Kafka{ + EnableTLS: true, + ClientCert: clientCertPem, + ClientKey: clientKeyPem, + CACerts: []string{caPem}, + Brokers: []string{addr}, + Topic: Topic{ + LogMessage: "foo", + }, + }, + }) + if err != nil { + t.Fatal("Expecting to work:", err) + } + }) +} + +func doListenerTLSTest(t *testing.T, willWork bool, tlsConf *tls.Config, f func(addr string)) { + //sarama.Logger = log.New(os.Stderr, "", log.LstdFlags) + + seedListener, err := tls.Listen("tcp", "127.0.0.1:0", tlsConf) + if err != nil { + t.Fatal("cannot open listener", err) + } + + var childT *testing.T + if willWork { + childT = t + } else { + childT = &testing.T{} // we want to swallow errors + } + + seed := sarama.NewMockBrokerListener(childT, int32(0), seedListener) + defer seed.Close() + + if willWork { + seed.Returns(new(sarama.MetadataResponse)) + } + + f(seed.Addr()) +} diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore index 3591f9f..c6c482d 100644 --- a/vendor/github.com/Shopify/sarama/.gitignore +++ b/vendor/github.com/Shopify/sarama/.gitignore @@ -22,3 +22,5 @@ _cgo_export.* _testmain.go *.exe + +coverage.txt diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml index f7c8855..cc38769 100644 --- a/vendor/github.com/Shopify/sarama/.travis.yml +++ b/vendor/github.com/Shopify/sarama/.travis.yml @@ -1,8 +1,7 @@ language: go go: -- 1.4.3 -- 1.5.3 -- 1.6 +- 1.8.x +- 1.9.x env: global: @@ -12,8 +11,9 @@ env: - KAFKA_HOSTNAME=localhost - DEBUG=true matrix: - - KAFKA_VERSION=0.8.2.2 - - KAFKA_VERSION=0.9.0.1 + - KAFKA_VERSION=0.10.2.1 + - KAFKA_VERSION=0.11.0.2 + - KAFKA_VERSION=1.0.0 before_install: - export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR} @@ -21,8 +21,7 @@ before_install: - vagrant/boot_cluster.sh - vagrant/create_topics.sh -install: -- make install_dependencies +install: make install_dependencies script: - make test @@ -30,4 +29,7 @@ script: - make errcheck - make fmt -sudo: false +after_success: +- bash <(curl -s https://codecov.io/bash) + +after_script: vagrant/halt_cluster.sh diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md index f0dd96f..fcc9287 100644 --- a/vendor/github.com/Shopify/sarama/CHANGELOG.md +++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md @@ -1,6 +1,208 @@ # Changelog -#### Version 1.9.0 (trunk) +#### Version 1.15.0 (2017-12-08) + +New Features: + - Claim official support for Kafka 1.0, though it did already work + ([#984](https://github.com/Shopify/sarama/pull/984)). + - Helper methods for Kafka version numbers to/from strings + ([#989](https://github.com/Shopify/sarama/pull/989)). + - Implement CreatePartitions request/response + ([#985](https://github.com/Shopify/sarama/pull/985)). + +Improvements: + - Add error codes 45-60 + ([#986](https://github.com/Shopify/sarama/issues/986)). + +Bug Fixes: + - Fix slow consuming for certain Kafka 0.11/1.0 configurations + ([#982](https://github.com/Shopify/sarama/pull/982)). + - Correctly determine when a FetchResponse contains the new message format + ([#990](https://github.com/Shopify/sarama/pull/990)). + - Fix producing with multiple headers + ([#996](https://github.com/Shopify/sarama/pull/996)). + - Fix handling of truncated record batches + ([#998](https://github.com/Shopify/sarama/pull/998)). + - Fix leaking metrics when closing brokers + ([#991](https://github.com/Shopify/sarama/pull/991)). + +#### Version 1.14.0 (2017-11-13) + +New Features: + - Add support for the new Kafka 0.11 record-batch format, including the wire + protocol and the necessary behavioural changes in the producer and consumer. + Transactions and idempotency are not yet supported, but producing and + consuming should work with all the existing bells and whistles (batching, + compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta + of Arista Networks for this work. Part of + ([#901](https://github.com/Shopify/sarama/issues/901)). + +Bug Fixes: + - Fix encoding of ProduceResponse versions in test + ([#970](https://github.com/Shopify/sarama/pull/970)). + - Return partial replicas list when we have it + ([#975](https://github.com/Shopify/sarama/pull/975)). + +#### Version 1.13.0 (2017-10-04) + +New Features: + - Support for FetchRequest version 3 + ([#905](https://github.com/Shopify/sarama/pull/905)). + - Permit setting version on mock FetchResponses + ([#939](https://github.com/Shopify/sarama/pull/939)). + - Add a configuration option to support storing only minimal metadata for + extremely large clusters + ([#937](https://github.com/Shopify/sarama/pull/937)). + - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets + ([#932](https://github.com/Shopify/sarama/pull/932)). + +Improvements: + - Provide the block-level timestamp when consuming compressed messages + ([#885](https://github.com/Shopify/sarama/issues/885)). + - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned + by the broker, which can be meaningful + ([#930](https://github.com/Shopify/sarama/pull/930)). + - Use a `Ticker` to reduce consumer timer overhead at the cost of higher + variance in the actual timeout + ([#933](https://github.com/Shopify/sarama/pull/933)). + +Bug Fixes: + - Gracefully handle messages with negative timestamps + ([#907](https://github.com/Shopify/sarama/pull/907)). + - Raise a proper error when encountering an unknown message version + ([#940](https://github.com/Shopify/sarama/pull/940)). + +#### Version 1.12.0 (2017-05-08) + +New Features: + - Added support for the `ApiVersions` request and response pair, and Kafka + version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note + that you still need to specify the Kafka version in the Sarama configuration + for the time being. + - Added a `Brokers` method to the Client which returns the complete set of + active brokers ([#813](https://github.com/Shopify/sarama/pull/813)). + - Added an `InSyncReplicas` method to the Client which returns the set of all + in-sync broker IDs for the given partition, now that the Kafka versions for + which this was misleading are no longer in our supported set + ([#872](https://github.com/Shopify/sarama/pull/872)). + - Added a `NewCustomHashPartitioner` method which allows constructing a hash + partitioner with a custom hash method in case the default (FNV-1a) is not + suitable + ([#837](https://github.com/Shopify/sarama/pull/837), + [#841](https://github.com/Shopify/sarama/pull/841)). + +Improvements: + - Recognize more Kafka error codes + ([#859](https://github.com/Shopify/sarama/pull/859)). + +Bug Fixes: + - Fix an issue where decoding a malformed FetchRequest would not return the + correct error ([#818](https://github.com/Shopify/sarama/pull/818)). + - Respect ordering of group protocols in JoinGroupRequests. This fix is + transparent if you're using the `AddGroupProtocol` or + `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from + the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols` + ([#812](https://github.com/Shopify/sarama/issues/812)). + - Fix an alignment-related issue with atomics on 32-bit architectures + ([#859](https://github.com/Shopify/sarama/pull/859)). + +#### Version 1.11.0 (2016-12-20) + +_Important:_ As of Sarama 1.11 it is necessary to set the config value of +`Producer.Return.Successes` to true in order to use the SyncProducer. Previous +versions would silently override this value when instantiating a SyncProducer +which led to unexpected values and data races. + +New Features: + - Metrics! Thanks to Sébastien Launay for all his work on this feature + ([#701](https://github.com/Shopify/sarama/pull/701), + [#746](https://github.com/Shopify/sarama/pull/746), + [#766](https://github.com/Shopify/sarama/pull/766)). + - Add support for LZ4 compression + ([#786](https://github.com/Shopify/sarama/pull/786)). + - Add support for ListOffsetRequest v1 and Kafka 0.10.1 + ([#775](https://github.com/Shopify/sarama/pull/775)). + - Added a `HighWaterMarks` method to the Consumer which aggregates the + `HighWaterMarkOffset` values of its child topic/partitions + ([#769](https://github.com/Shopify/sarama/pull/769)). + +Bug Fixes: + - Fixed producing when using timestamps, compression and Kafka 0.10 + ([#759](https://github.com/Shopify/sarama/pull/759)). + - Added missing decoder methods to DescribeGroups response + ([#756](https://github.com/Shopify/sarama/pull/756)). + - Fix producer shutdown when `Return.Errors` is disabled + ([#787](https://github.com/Shopify/sarama/pull/787)). + - Don't mutate configuration in SyncProducer + ([#790](https://github.com/Shopify/sarama/pull/790)). + - Fix crash on SASL initialization failure + ([#795](https://github.com/Shopify/sarama/pull/795)). + +#### Version 1.10.1 (2016-08-30) + +Bug Fixes: + - Fix the documentation for `HashPartitioner` which was incorrect + ([#717](https://github.com/Shopify/sarama/pull/717)). + - Permit client creation even when it is limited by ACLs + ([#722](https://github.com/Shopify/sarama/pull/722)). + - Several fixes to the consumer timer optimization code, regressions introduced + in v1.10.0. Go's timers are finicky + ([#730](https://github.com/Shopify/sarama/pull/730), + [#733](https://github.com/Shopify/sarama/pull/733), + [#734](https://github.com/Shopify/sarama/pull/734)). + - Handle consuming compressed relative offsets with Kafka 0.10 + ([#735](https://github.com/Shopify/sarama/pull/735)). + +#### Version 1.10.0 (2016-08-02) + +_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of +Kafka you are running against (via the `config.Version` value) in order to use +features that may not be compatible with old Kafka versions. If you don't +specify this value it will default to 0.8.2 (the minimum supported), and trying +to use more recent features (like the offset manager) will fail with an error. + +_Also:_ The offset-manager's behaviour has been changed to match the upstream +java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and +[#713](https://github.com/Shopify/sarama/pull/713)). If you use the +offset-manager, please ensure that you are committing one *greater* than the +last consumed message offset or else you may end up consuming duplicate +messages. + +New Features: + - Support for Kafka 0.10 + ([#672](https://github.com/Shopify/sarama/pull/672), + [#678](https://github.com/Shopify/sarama/pull/678), + [#681](https://github.com/Shopify/sarama/pull/681), and others). + - Support for configuring the target Kafka version + ([#676](https://github.com/Shopify/sarama/pull/676)). + - Batch producing support in the SyncProducer + ([#677](https://github.com/Shopify/sarama/pull/677)). + - Extend producer mock to allow setting expectations on message contents + ([#667](https://github.com/Shopify/sarama/pull/667)). + +Improvements: + - Support `nil` compressed messages for deleting in compacted topics + ([#634](https://github.com/Shopify/sarama/pull/634)). + - Pre-allocate decoding errors, greatly reducing heap usage and GC time against + misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)). + - Re-use consumer expiry timers, removing one allocation per consumed message + ([#707](https://github.com/Shopify/sarama/pull/707)). + +Bug Fixes: + - Actually default the client ID to "sarama" like we say we do + ([#664](https://github.com/Shopify/sarama/pull/664)). + - Fix a rare issue where `Client.Leader` could return the wrong error + ([#685](https://github.com/Shopify/sarama/pull/685)). + - Fix a possible tight loop in the consumer + ([#693](https://github.com/Shopify/sarama/pull/693)). + - Match upstream's offset-tracking behaviour + ([#705](https://github.com/Shopify/sarama/pull/705)). + - Report UnknownTopicOrPartition errors from the offset manager + ([#706](https://github.com/Shopify/sarama/pull/706)). + - Fix possible negative partition value from the HashPartitioner + ([#709](https://github.com/Shopify/sarama/pull/709)). + +#### Version 1.9.0 (2016-05-16) New Features: - Add support for custom offset manager retention durations @@ -9,10 +211,20 @@ New Features: implementations ([#570](https://github.com/Shopify/sarama/pull/570)). - Declare support for Golang 1.6 ([#611](https://github.com/Shopify/sarama/pull/611)). + - Support for SASL plain-text auth + ([#648](https://github.com/Shopify/sarama/pull/648)). Improvements: - Simplified broker locking scheme slightly ([#604](https://github.com/Shopify/sarama/pull/604)). + - Documentation cleanup + ([#605](https://github.com/Shopify/sarama/pull/605), + [#621](https://github.com/Shopify/sarama/pull/621), + [#654](https://github.com/Shopify/sarama/pull/654)). + +Bug Fixes: + - Fix race condition shutting down the OffsetManager + ([#658](https://github.com/Shopify/sarama/pull/658)). #### Version 1.8.0 (2016-02-01) diff --git a/vendor/github.com/Shopify/sarama/CONTRIBUTING.md b/vendor/github.com/Shopify/sarama/CONTRIBUTING.md deleted file mode 100644 index b0f107c..0000000 --- a/vendor/github.com/Shopify/sarama/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing - -Contributions are always welcome, both reporting issues and submitting pull requests! - -### Reporting issues - -Please make sure to include any potentially useful information in the issue, so we can pinpoint the issue faster without going back and forth. - -- What SHA of Sarama are you running? If this is not the latest SHA on the master branch, please try if the problem persists with the latest version. -- You can set `sarama.Logger` to a [log.Logger](http://golang.org/pkg/log/#Logger) instance to capture debug output. Please include it in your issue description. -- Also look at the logs of the Kafka broker you are connected to. If you see anything out of the ordinary, please include it. - -Also, please include the following information about your environment, so we can help you faster: - -- What version of Kafka are you using? -- What version of Go are you using? -- What are the values of your Producer/Consumer/Client configuration? - - -### Submitting pull requests - -We will gladly accept bug fixes, or additions to this library. Please fork this library, commit & push your changes, and open a pull request. Because this library is in production use by many people and applications, we code review all additions. To make the review process go as smooth as possible, please consider the following. - -- If you plan to work on something major, please open an issue to discuss the design first. -- Don't break backwards compatibility. If you really have to, open an issue to discuss this first. -- Make sure to use the `go fmt` command to format your code according to the standards. Even better, set up your editor to do this for you when saving. -- Run [go vet](https://godoc.org/golang.org/x/tools/cmd/vet) to detect any suspicious constructs in your code that could be bugs. -- Explicitly handle all error return values. If you really want to ignore an error value, you can assign it to `_`.You can use [errcheck](https://github.com/kisielk/errcheck) to verify whether you have handled all errors. -- You may also want to run [golint](https://github.com/golang/lint) as well to detect style problems. -- Add tests that cover the changes you made. Make sure to run `go test` with the `-race` argument to test for race conditions. -- Make sure your code is supported by all the Go versions we support. You can rely on [Travis CI](https://travis-ci.org/Shopify/sarama) for testing older Go versions diff --git a/vendor/github.com/Shopify/sarama/MIT-LICENSE b/vendor/github.com/Shopify/sarama/LICENSE similarity index 100% rename from vendor/github.com/Shopify/sarama/MIT-LICENSE rename to vendor/github.com/Shopify/sarama/LICENSE diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile index 01c6d35..58a39e4 100644 --- a/vendor/github.com/Shopify/sarama/Makefile +++ b/vendor/github.com/Shopify/sarama/Makefile @@ -1,24 +1,29 @@ default: fmt vet errcheck test +# Taken from https://github.com/codecov/example-go#caveat-multiple-files test: - go test -v -timeout 60s -race ./... + echo "" > coverage.txt + for d in `go list ./... | grep -v vendor`; do \ + go test -v -timeout 60s -race -coverprofile=profile.out -covermode=atomic $$d; \ + if [ -f profile.out ]; then \ + cat profile.out >> coverage.txt; \ + rm profile.out; \ + fi \ + done vet: go vet ./... errcheck: - @if go version | grep -q go1.5; then errcheck github.com/Shopify/sarama/...; fi + errcheck github.com/Shopify/sarama/... fmt: @if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi -install_dependencies: install_errcheck install_go_vet get +install_dependencies: install_errcheck get install_errcheck: - @if go version | grep -q go1.5; then go get github.com/kisielk/errcheck; fi - -install_go_vet: - go get golang.org/x/tools/cmd/vet + go get github.com/kisielk/errcheck get: go get -t diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md index 417c9b5..28431f1 100644 --- a/vendor/github.com/Shopify/sarama/README.md +++ b/vendor/github.com/Shopify/sarama/README.md @@ -3,6 +3,7 @@ sarama [![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama) [![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama) +[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama) Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later). @@ -13,13 +14,15 @@ Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apa - The [examples](./examples) directory contains more elaborate example applications. - The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation. +You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions). + ### Compatibility and API stability Sarama provides a "2 releases + 2 months" compatibility guarantee: we support the two latest stable releases of Kafka and Go, and we provide a two month grace period for older releases. This means we currently officially support -Go 1.6, 1.5 and 1.4, and Kafka 0.9.0 and 0.8.2, although older releases are still -likely to work. +Go 1.9 and 1.8, and Kafka 1.0 through 0.10, although older releases are +still likely to work. Sarama follows semantic versioning and provides API stability via the gopkg.in service. You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1. @@ -27,7 +30,7 @@ A changelog is available [here](CHANGELOG.md). ### Contributing -* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/CONTRIBUTING.md). +* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md). * Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details. * The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/Shopify/sarama/Vagrantfile index 4586d9a..f4b848a 100644 --- a/vendor/github.com/Shopify/sarama/Vagrantfile +++ b/vendor/github.com/Shopify/sarama/Vagrantfile @@ -4,6 +4,7 @@ # Vagrantfile API/syntax version. Don't touch unless you know what you're doing! VAGRANTFILE_API_VERSION = "2" +# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB MEMORY = 3072 Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go new file mode 100644 index 0000000..ab65f01 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/api_versions_request.go @@ -0,0 +1,24 @@ +package sarama + +type ApiVersionsRequest struct { +} + +func (r *ApiVersionsRequest) encode(pe packetEncoder) error { + return nil +} + +func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { + return nil +} + +func (r *ApiVersionsRequest) key() int16 { + return 18 +} + +func (r *ApiVersionsRequest) version() int16 { + return 0 +} + +func (r *ApiVersionsRequest) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go new file mode 100644 index 0000000..23bc326 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/api_versions_response.go @@ -0,0 +1,87 @@ +package sarama + +type ApiVersionsResponseBlock struct { + ApiKey int16 + MinVersion int16 + MaxVersion int16 +} + +func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error { + pe.putInt16(b.ApiKey) + pe.putInt16(b.MinVersion) + pe.putInt16(b.MaxVersion) + return nil +} + +func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error { + var err error + + if b.ApiKey, err = pd.getInt16(); err != nil { + return err + } + + if b.MinVersion, err = pd.getInt16(); err != nil { + return err + } + + if b.MaxVersion, err = pd.getInt16(); err != nil { + return err + } + + return nil +} + +type ApiVersionsResponse struct { + Err KError + ApiVersions []*ApiVersionsResponseBlock +} + +func (r *ApiVersionsResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + if err := pe.putArrayLength(len(r.ApiVersions)); err != nil { + return err + } + for _, apiVersion := range r.ApiVersions { + if err := apiVersion.encode(pe); err != nil { + return err + } + } + return nil +} + +func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks) + for i := 0; i < numBlocks; i++ { + block := new(ApiVersionsResponseBlock) + if err := block.decode(pd); err != nil { + return err + } + r.ApiVersions[i] = block + } + + return nil +} + +func (r *ApiVersionsResponse) key() int16 { + return 18 +} + +func (r *ApiVersionsResponse) version() int16 { + return 0 +} + +func (r *ApiVersionsResponse) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go index e7ae8c2..1eff81c 100644 --- a/vendor/github.com/Shopify/sarama/async_producer.go +++ b/vendor/github.com/Shopify/sarama/async_producer.go @@ -1,6 +1,7 @@ package sarama import ( + "encoding/binary" "fmt" "sync" "time" @@ -17,24 +18,23 @@ import ( // scope. type AsyncProducer interface { - // AsyncClose triggers a shutdown of the producer, flushing any messages it may - // have buffered. The shutdown has completed when both the Errors and Successes - // channels have been closed. When calling AsyncClose, you *must* continue to - // read from those channels in order to drain the results of any messages in - // flight. + // AsyncClose triggers a shutdown of the producer. The shutdown has completed + // when both the Errors and Successes channels have been closed. When calling + // AsyncClose, you *must* continue to read from those channels in order to + // drain the results of any messages in flight. AsyncClose() - // Close shuts down the producer and flushes any messages it may have buffered. - // You must call this function before a producer object passes out of scope, as - // it may otherwise leak memory. You must call this before calling Close on the - // underlying client. + // Close shuts down the producer and waits for any buffered messages to be + // flushed. You must call this function before a producer object passes out of + // scope, as it may otherwise leak memory. You must call this before calling + // Close on the underlying client. Close() error // Input is the input channel for the user to write messages to that they // wish to send. Input() chan<- *ProducerMessage - // Successes is the success output channel back to the user when AckSuccesses is + // Successes is the success output channel back to the user when Return.Successes is // enabled. If Return.Successes is true, you MUST read from this channel or the // Producer will deadlock. It is suggested that you send and read messages // together in a single select statement. @@ -120,6 +120,10 @@ type ProducerMessage struct { // StringEncoder and ByteEncoder. Value Encoder + // The headers are key-value pairs that are transparently passed + // by Kafka between producers and consumers. + Headers []RecordHeader + // This field is used to hold arbitrary data you wish to include so it // will be available when receiving on the Successes and Errors channels. // Sarama completely ignores this field and is only to be used for @@ -135,6 +139,11 @@ type ProducerMessage struct { // Partition is the partition that the message was sent to. This is only // guaranteed to be defined if the message was successfully delivered. Partition int32 + // Timestamp is the timestamp assigned to the message by the broker. This + // is only guaranteed to be defined if the message was successfully + // delivered, RequiredAcks is not NoResponse, and the Kafka broker is at + // least version 0.10.0. + Timestamp time.Time retries int flags flagSet @@ -142,8 +151,16 @@ type ProducerMessage struct { const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc. -func (m *ProducerMessage) byteSize() int { - size := producerMessageOverhead +func (m *ProducerMessage) byteSize(version int) int { + var size int + if version >= 2 { + size = maximumRecordOverhead + for _, h := range m.Headers { + size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32 + } + } else { + size = producerMessageOverhead + } if m.Key != nil { size += m.Key.Length() } @@ -195,7 +212,7 @@ func (p *asyncProducer) Close() error { if p.conf.Producer.Return.Successes { go withRecover(func() { - for _ = range p.successes { + for range p.successes { } }) } @@ -205,6 +222,8 @@ func (p *asyncProducer) Close() error { for event := range p.errors { errors = append(errors, event) } + } else { + <-p.errors } if len(errors) > 0 { @@ -248,7 +267,11 @@ func (p *asyncProducer) dispatcher() { p.inFlight.Add(1) } - if msg.byteSize() > p.conf.Producer.MaxMessageBytes { + version := 1 + if p.conf.Version.IsAtLeast(V0_11_0_0) { + version = 2 + } + if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes { p.returnError(msg, ErrMessageSizeTooLarge) continue } @@ -722,6 +745,11 @@ func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceRespo switch block.Err { // Success case ErrNoError: + if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() { + for _, msg := range msgs { + msg.Timestamp = block.Timestamp + } + } for i, msg := range msgs { msg.Offset = block.Offset + int64(i) } diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go index c4596f9..e04c7dd 100644 --- a/vendor/github.com/Shopify/sarama/broker.go +++ b/vendor/github.com/Shopify/sarama/broker.go @@ -2,6 +2,7 @@ package sarama import ( "crypto/tls" + "encoding/binary" "fmt" "io" "net" @@ -9,6 +10,8 @@ import ( "sync" "sync/atomic" "time" + + "github.com/rcrowley/go-metrics" ) // Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe. @@ -25,15 +28,31 @@ type Broker struct { responses chan responsePromise done chan bool + + incomingByteRate metrics.Meter + requestRate metrics.Meter + requestSize metrics.Histogram + requestLatency metrics.Histogram + outgoingByteRate metrics.Meter + responseRate metrics.Meter + responseSize metrics.Histogram + brokerIncomingByteRate metrics.Meter + brokerRequestRate metrics.Meter + brokerRequestSize metrics.Histogram + brokerRequestLatency metrics.Histogram + brokerOutgoingByteRate metrics.Meter + brokerResponseRate metrics.Meter + brokerResponseSize metrics.Histogram } type responsePromise struct { + requestTime time.Time correlationID int32 packets chan []byte errors chan error } -// NewBroker creates and returns a Broker targetting the given host:port address. +// NewBroker creates and returns a Broker targeting the given host:port address. // This does not attempt to actually connect, you have to call Open() for that. func NewBroker(addr string) *Broker { return &Broker{id: -1, addr: addr} @@ -82,6 +101,42 @@ func (b *Broker) Open(conf *Config) error { b.conn = newBufConn(b.conn) b.conf = conf + + // Create or reuse the global metrics shared between brokers + b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry) + b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry) + b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry) + b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry) + b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry) + b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry) + b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry) + // Do not gather metrics for seeded broker (only used during bootstrap) because they share + // the same id (-1) and are already exposed through the global metrics above + if b.id >= 0 { + b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry) + b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry) + b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry) + b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry) + b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry) + b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry) + b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry) + } + + if conf.Net.SASL.Enable { + b.connErr = b.sendAndReceiveSASLPlainAuth() + if b.connErr != nil { + err = b.conn.Close() + if err == nil { + Logger.Printf("Closed connection to broker %s\n", b.addr) + } else { + Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) + } + b.conn = nil + atomic.StoreInt32(&b.opened, 0) + return + } + } + b.done = make(chan bool) b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1) @@ -123,6 +178,13 @@ func (b *Broker) Close() error { b.done = nil b.responses = nil + if b.id >= 0 { + b.conf.MetricRegistry.Unregister(getMetricNameForBroker("incoming-byte-rate", b)) + b.conf.MetricRegistry.Unregister(getMetricNameForBroker("request-rate", b)) + b.conf.MetricRegistry.Unregister(getMetricNameForBroker("outgoing-byte-rate", b)) + b.conf.MetricRegistry.Unregister(getMetricNameForBroker("response-rate", b)) + } + if err == nil { Logger.Printf("Closed connection to broker %s\n", b.addr) } else { @@ -300,7 +362,40 @@ func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroups return response, nil } -func (b *Broker) send(rb requestBody, promiseResponse bool) (*responsePromise, error) { +func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) { + response := new(ApiVersionsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) { + response := new(CreateTopicsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) { + response := new(DeleteTopicsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) { b.lock.Lock() defer b.lock.Unlock() @@ -311,8 +406,12 @@ func (b *Broker) send(rb requestBody, promiseResponse bool) (*responsePromise, e return nil, ErrNotConnected } + if !b.conf.Version.IsAtLeast(rb.requiredVersion()) { + return nil, ErrUnsupportedVersion + } + req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} - buf, err := encode(req) + buf, err := encode(req, b.conf.MetricRegistry) if err != nil { return nil, err } @@ -322,23 +421,27 @@ func (b *Broker) send(rb requestBody, promiseResponse bool) (*responsePromise, e return nil, err } - _, err = b.conn.Write(buf) + requestTime := time.Now() + bytes, err := b.conn.Write(buf) + b.updateOutgoingCommunicationMetrics(bytes) if err != nil { return nil, err } b.correlationID++ if !promiseResponse { + // Record request latency without the response + b.updateRequestLatencyMetrics(time.Since(requestTime)) return nil, nil } - promise := responsePromise{req.correlationID, make(chan []byte), make(chan error)} + promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)} b.responses <- promise return &promise, nil } -func (b *Broker) sendAndReceive(req requestBody, res decoder) error { +func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error { promise, err := b.send(req, res != nil) if err != nil { @@ -351,7 +454,7 @@ func (b *Broker) sendAndReceive(req requestBody, res decoder) error { select { case buf := <-promise.packets: - return decode(buf, res) + return versionedDecode(buf, res, req.version()) case err = <-promise.errors: return err } @@ -420,8 +523,10 @@ func (b *Broker) responseReceiver() { continue } - _, err = io.ReadFull(b.conn, header) + bytesReadHeader, err := io.ReadFull(b.conn, header) + requestLatency := time.Since(response.requestTime) if err != nil { + b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) dead = err response.errors <- err continue @@ -430,11 +535,13 @@ func (b *Broker) responseReceiver() { decodedHeader := responseHeader{} err = decode(header, &decodedHeader) if err != nil { + b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) dead = err response.errors <- err continue } if decodedHeader.correlationID != response.correlationID { + b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) // TODO if decoded ID < cur ID, discard until we catch up // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)} @@ -443,7 +550,8 @@ func (b *Broker) responseReceiver() { } buf := make([]byte, decodedHeader.length-4) - _, err = io.ReadFull(b.conn, buf) + bytesReadBody, err := io.ReadFull(b.conn, buf) + b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency) if err != nil { dead = err response.errors <- err @@ -454,3 +562,153 @@ func (b *Broker) responseReceiver() { } close(b.done) } + +func (b *Broker) sendAndReceiveSASLPlainHandshake() error { + rb := &SaslHandshakeRequest{"PLAIN"} + req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return err + } + + err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) + if err != nil { + return err + } + + requestTime := time.Now() + bytes, err := b.conn.Write(buf) + b.updateOutgoingCommunicationMetrics(bytes) + if err != nil { + Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error()) + return err + } + b.correlationID++ + //wait for the response + header := make([]byte, 8) // response header + _, err = io.ReadFull(b.conn, header) + if err != nil { + Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error()) + return err + } + length := binary.BigEndian.Uint32(header[:4]) + payload := make([]byte, length-4) + n, err := io.ReadFull(b.conn, payload) + if err != nil { + Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error()) + return err + } + b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime)) + res := &SaslHandshakeResponse{} + err = versionedDecode(payload, res, 0) + if err != nil { + Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error()) + return err + } + if res.Err != ErrNoError { + Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error()) + return res.Err + } + Logger.Print("Successful SASL handshake") + return nil +} + +// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149) +// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9 +// +// In SASL Plain, Kafka expects the auth header to be in the following format +// Message format (from https://tools.ietf.org/html/rfc4616): +// +// message = [authzid] UTF8NUL authcid UTF8NUL passwd +// authcid = 1*SAFE ; MUST accept up to 255 octets +// authzid = 1*SAFE ; MUST accept up to 255 octets +// passwd = 1*SAFE ; MUST accept up to 255 octets +// UTF8NUL = %x00 ; UTF-8 encoded NUL character +// +// SAFE = UTF1 / UTF2 / UTF3 / UTF4 +// ;; any UTF-8 encoded Unicode character except NUL +// +// When credentials are valid, Kafka returns a 4 byte array of null characters. +// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way +// of responding to bad credentials but thats how its being done today. +func (b *Broker) sendAndReceiveSASLPlainAuth() error { + if b.conf.Net.SASL.Handshake { + handshakeErr := b.sendAndReceiveSASLPlainHandshake() + if handshakeErr != nil { + Logger.Printf("Error while performing SASL handshake %s\n", b.addr) + return handshakeErr + } + } + length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password) + authBytes := make([]byte, length+4) //4 byte length header + auth data + binary.BigEndian.PutUint32(authBytes, uint32(length)) + copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password)) + + err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) + if err != nil { + Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error()) + return err + } + + requestTime := time.Now() + bytesWritten, err := b.conn.Write(authBytes) + b.updateOutgoingCommunicationMetrics(bytesWritten) + if err != nil { + Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) + return err + } + + header := make([]byte, 4) + n, err := io.ReadFull(b.conn, header) + b.updateIncomingCommunicationMetrics(n, time.Since(requestTime)) + // If the credentials are valid, we would get a 4 byte response filled with null characters. + // Otherwise, the broker closes the connection and we get an EOF + if err != nil { + Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) + return err + } + + Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header) + return nil +} + +func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) { + b.updateRequestLatencyMetrics(requestLatency) + b.responseRate.Mark(1) + if b.brokerResponseRate != nil { + b.brokerResponseRate.Mark(1) + } + responseSize := int64(bytes) + b.incomingByteRate.Mark(responseSize) + if b.brokerIncomingByteRate != nil { + b.brokerIncomingByteRate.Mark(responseSize) + } + b.responseSize.Update(responseSize) + if b.brokerResponseSize != nil { + b.brokerResponseSize.Update(responseSize) + } +} + +func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) { + requestLatencyInMs := int64(requestLatency / time.Millisecond) + b.requestLatency.Update(requestLatencyInMs) + if b.brokerRequestLatency != nil { + b.brokerRequestLatency.Update(requestLatencyInMs) + } +} + +func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) { + b.requestRate.Mark(1) + if b.brokerRequestRate != nil { + b.brokerRequestRate.Mark(1) + } + requestSize := int64(bytes) + b.outgoingByteRate.Mark(requestSize) + if b.brokerOutgoingByteRate != nil { + b.brokerOutgoingByteRate.Mark(requestSize) + } + b.requestSize.Update(requestSize) + if b.brokerRequestSize != nil { + b.brokerRequestSize.Update(requestSize) + } +} diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go index 54a8897..3dbfc4b 100644 --- a/vendor/github.com/Shopify/sarama/client.go +++ b/vendor/github.com/Shopify/sarama/client.go @@ -17,6 +17,9 @@ type Client interface { // altered after it has been created. Config() *Config + // Brokers returns the current set of active brokers as retrieved from cluster metadata. + Brokers() []*Broker + // Topics returns the set of available topics as retrieved from cluster metadata. Topics() ([]string, error) @@ -35,15 +38,20 @@ type Client interface { // Replicas returns the set of all replica IDs for the given partition. Replicas(topic string, partitionID int32) ([]int32, error) + // InSyncReplicas returns the set of all in-sync replica IDs for the given + // partition. In-sync replicas are replicas which are fully caught up with + // the partition leader. + InSyncReplicas(topic string, partitionID int32) ([]int32, error) + // RefreshMetadata takes a list of topics and queries the cluster to refresh the // available metadata for those topics. If no topics are provided, it will refresh // metadata for all topics. RefreshMetadata(topics ...string) error // GetOffset queries the cluster to get the most recent available offset at the - // given time on the topic/partition combination. Time should be OffsetOldest for - // the earliest available offset, OffsetNewest for the offset of the message that - // will be produced next, or a time. + // given time (in milliseconds) on the topic/partition combination. + // Time should be OffsetOldest for the earliest available offset, + // OffsetNewest for the offset of the message that will be produced next, or a time. GetOffset(topic string, partitionID int32, time int64) (int64, error) // Coordinator returns the coordinating broker for a consumer group. It will @@ -133,18 +141,20 @@ func NewClient(addrs []string, conf *Config) (Client, error) { client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index])) } - // do an initial fetch of all cluster metadata by specifing an empty list of topics - err := client.RefreshMetadata() - switch err { - case nil: - break - case ErrLeaderNotAvailable, ErrReplicaNotAvailable: - // indicates that maybe part of the cluster is down, but is not fatal to creating the client - Logger.Println(err) - default: - close(client.closed) // we haven't started the background updater yet, so we have to do this manually - _ = client.Close() - return nil, err + if conf.Metadata.Full { + // do an initial fetch of all cluster metadata by specifying an empty list of topics + err := client.RefreshMetadata() + switch err { + case nil: + break + case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed: + // indicates that maybe part of the cluster is down, but is not fatal to creating the client + Logger.Println(err) + default: + close(client.closed) // we haven't started the background updater yet, so we have to do this manually + _ = client.Close() + return nil, err + } } go withRecover(client.backgroundMetadataUpdater) @@ -157,6 +167,16 @@ func (client *client) Config() *Config { return client.conf } +func (client *client) Brokers() []*Broker { + client.lock.RLock() + defer client.lock.RUnlock() + brokers := make([]*Broker, 0) + for _, broker := range client.brokers { + brokers = append(brokers, broker) + } + return brokers +} + func (client *client) Close() error { if client.Closed() { // Chances are this is being called from a defer() and the error will go unobserved @@ -277,9 +297,34 @@ func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) } if metadata.Err == ErrReplicaNotAvailable { - return nil, metadata.Err + return dupInt32Slice(metadata.Replicas), metadata.Err } - return dupeAndSort(metadata.Replicas), nil + return dupInt32Slice(metadata.Replicas), nil +} + +func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + metadata := client.cachedMetadata(topic, partitionID) + + if metadata == nil { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + metadata = client.cachedMetadata(topic, partitionID) + } + + if metadata == nil { + return nil, ErrUnknownTopicOrPartition + } + + if metadata.Err == ErrReplicaNotAvailable { + return dupInt32Slice(metadata.Isr), metadata.Err + } + return dupInt32Slice(metadata.Isr), nil } func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { @@ -290,7 +335,7 @@ func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { leader, err := client.cachedLeader(topic, partitionID) if leader == nil { - err := client.RefreshMetadata(topic) + err = client.RefreshMetadata(topic) if err != nil { return nil, err } @@ -521,6 +566,9 @@ func (client *client) getOffset(topic string, partitionID int32, time int64) (in } request := &OffsetRequest{} + if client.conf.Version.IsAtLeast(V0_10_1_0) { + request.Version = 1 + } request.AddBlock(topic, partitionID, time, 1) response, err := broker.GetAvailableOffsets(request) @@ -559,7 +607,20 @@ func (client *client) backgroundMetadataUpdater() { for { select { case <-ticker.C: - if err := client.RefreshMetadata(); err != nil { + topics := []string{} + if !client.conf.Metadata.Full { + if specificTopics, err := client.Topics(); err != nil { + Logger.Println("Client background metadata topic load:", err) + break + } else if len(specificTopics) == 0 { + Logger.Println("Client background metadata update: no specific topics to update") + break + } else { + topics = specificTopics + } + } + + if err := client.RefreshMetadata(topics...); err != nil { Logger.Println("Client background metadata update:", err) } case <-client.closer: @@ -589,12 +650,12 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) switch err.(type) { case nil: // valid response, use it - if shouldRetry, err := client.updateMetadata(response); shouldRetry { + shouldRetry, err := client.updateMetadata(response) + if shouldRetry { Logger.Println("client/metadata found some partitions to be leaderless") return retry(err) // note: err can be nil - } else { - return err } + return err case PacketEncodingError: // didn't even send, return the error @@ -685,7 +746,7 @@ func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemainin } for broker := client.any(); broker != nil; broker = client.any() { - Logger.Printf("client/coordinator requesting coordinator for consumergoup %s from %s\n", consumerGroup, broker.Addr()) + Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr()) request := new(ConsumerMetadataRequest) request.ConsumerGroup = consumerGroup @@ -707,7 +768,7 @@ func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemainin switch response.Err { case ErrNoError: - Logger.Printf("client/coordinator coordinator for consumergoup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr()) + Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr()) return response, nil case ErrConsumerCoordinatorNotAvailable: diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go index 188b4b2..e4ff680 100644 --- a/vendor/github.com/Shopify/sarama/config.go +++ b/vendor/github.com/Shopify/sarama/config.go @@ -4,9 +4,13 @@ import ( "crypto/tls" "regexp" "time" + + "github.com/rcrowley/go-metrics" ) -var validID *regexp.Regexp = regexp.MustCompile(`\A[A-Za-z0-9._-]*\z`) +const defaultClientID = "sarama" + +var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`) // Config is used to pass multiple configuration options to Sarama's constructors. type Config struct { @@ -33,6 +37,21 @@ type Config struct { Config *tls.Config } + // SASL based authentication with broker. While there are multiple SASL authentication methods + // the current implementation is limited to plaintext (SASL/PLAIN) authentication + SASL struct { + // Whether or not to use SASL authentication when connecting to the broker + // (defaults to false). + Enable bool + // Whether or not to send the Kafka SASL handshake first if enabled + // (defaults to true). You should only set this to false if you're using + // a non-Kafka SASL proxy. + Handshake bool + //username and password for SASL/PLAIN authentication + User string + Password string + } + // KeepAlive specifies the keep-alive period for an active network connection. // If zero, keep-alives are disabled. (default is 0: disabled). KeepAlive time.Duration @@ -53,6 +72,12 @@ type Config struct { // Defaults to 10 minutes. Set to 0 to disable. Similar to // `topic.metadata.refresh.interval.ms` in the JVM version. RefreshFrequency time.Duration + + // Whether to maintain a full set of metadata for all topics, or just + // the minimal set that has been necessary so far. The full set is simpler + // and usually more convenient, but can take up a substantial amount of + // memory if you have many topics and partitions. Defaults to true. + Full bool } // Producer is the namespace for configuration related to producing messages, @@ -80,7 +105,10 @@ type Config struct { Partitioner PartitionerConstructor // Return specifies what channels will be populated. If they are set to true, - // you must read from the respective channels to prevent deadlock. + // you must read from the respective channels to prevent deadlock. If, + // however, this config is used to create a `SyncProducer`, both must be set + // to true and you shall not read from the channels since the producer does + // this internally. Return struct { // If enabled, successfully delivered messages will be returned on the // Successes channel (default disabled). @@ -168,17 +196,29 @@ type Config struct { // Equivalent to the JVM's `fetch.wait.max.ms`. MaxWaitTime time.Duration - // The maximum amount of time the consumer expects a message takes to process - // for the user. If writing to the Messages channel takes longer than this, - // that partition will stop fetching more messages until it can proceed again. + // The maximum amount of time the consumer expects a message takes to + // process for the user. If writing to the Messages channel takes longer + // than this, that partition will stop fetching more messages until it + // can proceed again. // Note that, since the Messages channel is buffered, the actual grace time is // (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms. + // If a message is not written to the Messages channel between two ticks + // of the expiryTicker then a timeout is detected. + // Using a ticker instead of a timer to detect timeouts should typically + // result in many fewer calls to Timer functions which may result in a + // significant performance improvement if many messages are being sent + // and timeouts are infrequent. + // The disadvantage of using a ticker instead of a timer is that + // timeouts will be less accurate. That is, the effective timeout could + // be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For + // example, if `MaxProcessingTime` is 100ms then a delay of 180ms + // between two messages being sent may not be recognized as a timeout. MaxProcessingTime time.Duration // Return specifies what channels will be populated. If they are set to true, // you must read from them to prevent deadlock. Return struct { - // If enabled, any errors that occured while consuming are returned on + // If enabled, any errors that occurred while consuming are returned on // the Errors channel (default disabled). Errors bool } @@ -197,7 +237,8 @@ type Config struct { // The retention duration for committed offsets. If zero, disabled // (in which case the `offsets.retention.minutes` option on the // broker will be used). Kafka only supports precision up to - // milliseconds; nanoseconds will be truncated. + // milliseconds; nanoseconds will be truncated. Requires Kafka + // broker version 0.9.0 or later. // (default is 0: disabled). Retention time.Duration } @@ -212,6 +253,19 @@ type Config struct { // in the background while user code is working, greatly improving throughput. // Defaults to 256. ChannelBufferSize int + // The version of Kafka that Sarama will assume it is running against. + // Defaults to the oldest supported stable version. Since Kafka provides + // backwards-compatibility, setting it to a version older than you have + // will not break anything, although it may prevent you from using the + // latest features. Setting it to a version greater than you are actually + // running may lead to random breakage. + Version KafkaVersion + // The registry to define metrics into. + // Defaults to a local registry. + // If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true" + // prior to starting Sarama. + // See Examples on how to use the metrics registry + MetricRegistry metrics.Registry } // NewConfig returns a new configuration instance with sane defaults. @@ -222,10 +276,12 @@ func NewConfig() *Config { c.Net.DialTimeout = 30 * time.Second c.Net.ReadTimeout = 30 * time.Second c.Net.WriteTimeout = 30 * time.Second + c.Net.SASL.Handshake = true c.Metadata.Retry.Max = 3 c.Metadata.Retry.Backoff = 250 * time.Millisecond c.Metadata.RefreshFrequency = 10 * time.Minute + c.Metadata.Full = true c.Producer.MaxMessageBytes = 1000000 c.Producer.RequiredAcks = WaitForLocal @@ -244,7 +300,10 @@ func NewConfig() *Config { c.Consumer.Offsets.CommitInterval = 1 * time.Second c.Consumer.Offsets.Initial = OffsetNewest + c.ClientID = defaultClientID c.ChannelBufferSize = 256 + c.Version = minVersion + c.MetricRegistry = metrics.NewRegistry() return c } @@ -256,14 +315,25 @@ func (c *Config) Validate() error { if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil { Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.") } + if c.Net.SASL.Enable == false { + if c.Net.SASL.User != "" { + Logger.Println("Net.SASL is disabled but a non-empty username was provided.") + } + if c.Net.SASL.Password != "" { + Logger.Println("Net.SASL is disabled but a non-empty password was provided.") + } + } if c.Producer.RequiredAcks > 1 { Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.") } if c.Producer.MaxMessageBytes >= int(MaxRequestSize) { - Logger.Println("Producer.MaxMessageBytes is larger than MaxRequestSize; it will be ignored.") + Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.") } if c.Producer.Flush.Bytes >= int(MaxRequestSize) { - Logger.Println("Producer.Flush.Bytes is larger than MaxRequestSize; it will be ignored.") + Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.") + } + if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 { + Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.") } if c.Producer.Timeout%time.Millisecond != 0 { Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.") @@ -277,7 +347,7 @@ func (c *Config) Validate() error { if c.Consumer.Offsets.Retention%time.Millisecond != 0 { Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.") } - if c.ClientID == "sarama" { + if c.ClientID == defaultClientID { Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.") } @@ -293,6 +363,10 @@ func (c *Config) Validate() error { return ConfigurationError("Net.WriteTimeout must be > 0") case c.Net.KeepAlive < 0: return ConfigurationError("Net.KeepAlive must be >= 0") + case c.Net.SASL.Enable == true && c.Net.SASL.User == "": + return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") + case c.Net.SASL.Enable == true && c.Net.SASL.Password == "": + return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") } // validate the Metadata values @@ -331,6 +405,10 @@ func (c *Config) Validate() error { return ConfigurationError("Producer.Retry.Backoff must be >= 0") } + if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) { + return ConfigurationError("lz4 compression requires Version >= V0_10_0_0") + } + // validate the Consumer values switch { case c.Consumer.Fetch.Min <= 0: diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go index b482371..2d29e4b 100644 --- a/vendor/github.com/Shopify/sarama/consumer.go +++ b/vendor/github.com/Shopify/sarama/consumer.go @@ -10,10 +10,13 @@ import ( // ConsumerMessage encapsulates a Kafka message returned by the consumer. type ConsumerMessage struct { - Key, Value []byte - Topic string - Partition int32 - Offset int64 + Key, Value []byte + Topic string + Partition int32 + Offset int64 + Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp + BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp + Headers []*RecordHeader // only set if kafka is version 0.11+ } // ConsumerError is what is provided to the user when an error occurs. @@ -62,6 +65,10 @@ type Consumer interface { // or OffsetOldest ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) + // HighWaterMarks returns the current high water marks for each topic and partition. + // Consistency between partitions is not guaranteed since high water marks are updated separately. + HighWaterMarks() map[string]map[int32]int64 + // Close shuts down the consumer. It must be called after all child // PartitionConsumers have already been closed. Close() error @@ -162,6 +169,22 @@ func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) return child, nil } +func (c *consumer) HighWaterMarks() map[string]map[int32]int64 { + c.lock.Lock() + defer c.lock.Unlock() + + hwms := make(map[string]map[int32]int64) + for topic, p := range c.children { + hwm := make(map[int32]int64, len(p)) + for partition, pc := range p { + hwm[partition] = pc.HighWaterMarkOffset() + } + hwms[topic] = hwm + } + + return hwms +} + func (c *consumer) addChild(child *partitionConsumer) error { c.lock.Lock() defer c.lock.Unlock() @@ -225,9 +248,9 @@ func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { // PartitionConsumer -// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call Close() -// or AsyncClose() on a PartitionConsumer to avoid leaks, it will not be garbage-collected automatically -// when it passes out of scope. +// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or +// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out +// of scope. // // The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range // loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported @@ -236,26 +259,32 @@ func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { // By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set // your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement // or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches. +// +// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of +// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process +// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call +// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will +// also drain the Messages channel, harvest all errors & return them once cleanup has completed. type PartitionConsumer interface { - // AsyncClose initiates a shutdown of the PartitionConsumer. This method will - // return immediately, after which you should wait until the 'messages' and - // 'errors' channel are drained. It is required to call this function, or - // Close before a consumer object passes out of scope, as it will otherwise - // leak memory. You must call this before calling Close on the underlying client. + // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you + // should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this + // function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call + // this before calling Close on the underlying client. AsyncClose() - // Close stops the PartitionConsumer from fetching messages. It is required to - // call this function (or AsyncClose) before a consumer object passes out of - // scope, as it will otherwise leak memory. You must call this before calling - // Close on the underlying client. + // Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain + // the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service + // the Messages channel when this function is called, you will be competing with Close for messages; consider + // calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes + // out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client. Close() error // Messages returns the read channel for the messages that are returned by // the broker. Messages() <-chan *ConsumerMessage - // Errors returns a read channel of errors that occured during consuming, if + // Errors returns a read channel of errors that occurred during consuming, if // enabled. By default, errors are logged and not returned over this channel. // If you want to implement any custom error handling, set your config's // Consumer.Return.Errors setting to true, and read from this channel. @@ -268,10 +297,11 @@ type PartitionConsumer interface { } type partitionConsumer struct { - consumer *consumer - conf *Config - topic string - partition int32 + highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG + consumer *consumer + conf *Config + topic string + partition int32 broker *brokerConsumer messages chan *ConsumerMessage @@ -281,9 +311,8 @@ type partitionConsumer struct { trigger, dying chan none responseResult error - fetchSize int32 - offset int64 - highWaterMarkOffset int64 + fetchSize int32 + offset int64 } var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing @@ -303,7 +332,7 @@ func (child *partitionConsumer) sendError(err error) { } func (child *partitionConsumer) dispatcher() { - for _ = range child.trigger { + for range child.trigger { select { case <-child.dying: close(child.trigger) @@ -390,7 +419,7 @@ func (child *partitionConsumer) Close() error { child.AsyncClose() go withRecover(func() { - for _ = range child.messages { + for range child.messages { // drain } }) @@ -412,25 +441,37 @@ func (child *partitionConsumer) HighWaterMarkOffset() int64 { func (child *partitionConsumer) responseFeeder() { var msgs []*ConsumerMessage + msgSent := false feederLoop: for response := range child.feeder { msgs, child.responseResult = child.parseResponse(response) + expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime) for i, msg := range msgs { + messageSelect: select { case child.messages <- msg: - case <-time.After(child.conf.Consumer.MaxProcessingTime): - child.responseResult = errTimedOut - child.broker.acks.Done() - for _, msg = range msgs[i:] { - child.messages <- msg + msgSent = true + case <-expiryTicker.C: + if !msgSent { + child.responseResult = errTimedOut + child.broker.acks.Done() + for _, msg = range msgs[i:] { + child.messages <- msg + } + child.broker.input <- child + continue feederLoop + } else { + // current message has not been sent, return to select + // statement + msgSent = false + goto messageSelect } - child.broker.input <- child - continue feederLoop } } + expiryTicker.Stop() child.broker.acks.Done() } @@ -438,6 +479,87 @@ feederLoop: close(child.errors) } +func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) { + var messages []*ConsumerMessage + var incomplete bool + prelude := true + + for _, msgBlock := range msgSet.Messages { + for _, msg := range msgBlock.Messages() { + offset := msg.Offset + if msg.Msg.Version >= 1 { + baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset + offset += baseOffset + } + if prelude && offset < child.offset { + continue + } + prelude = false + + if offset >= child.offset { + messages = append(messages, &ConsumerMessage{ + Topic: child.topic, + Partition: child.partition, + Key: msg.Msg.Key, + Value: msg.Msg.Value, + Offset: offset, + Timestamp: msg.Msg.Timestamp, + BlockTimestamp: msgBlock.Msg.Timestamp, + }) + child.offset = offset + 1 + } else { + incomplete = true + } + } + } + + if incomplete || len(messages) == 0 { + return nil, ErrIncompleteResponse + } + return messages, nil +} + +func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) { + var messages []*ConsumerMessage + var incomplete bool + prelude := true + originalOffset := child.offset + + for _, rec := range batch.Records { + offset := batch.FirstOffset + rec.OffsetDelta + if prelude && offset < child.offset { + continue + } + prelude = false + + if offset >= child.offset { + messages = append(messages, &ConsumerMessage{ + Topic: child.topic, + Partition: child.partition, + Key: rec.Key, + Value: rec.Value, + Offset: offset, + Timestamp: batch.FirstTimestamp.Add(rec.TimestampDelta), + Headers: rec.Headers, + }) + child.offset = offset + 1 + } else { + incomplete = true + } + } + + if incomplete { + return nil, ErrIncompleteResponse + } + + child.offset = batch.FirstOffset + int64(batch.LastOffsetDelta) + 1 + if child.offset <= originalOffset { + return nil, ErrConsumerOffsetNotAdvanced + } + + return messages, nil +} + func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) { block := response.GetBlock(child.topic, child.partition) if block == nil { @@ -448,10 +570,18 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu return nil, block.Err } - if len(block.MsgSet.Messages) == 0 { + nRecs, err := block.Records.numRecords() + if err != nil { + return nil, err + } + if nRecs == 0 { + partialTrailingMessage, err := block.Records.isPartial() + if err != nil { + return nil, err + } // We got no messages. If we got a trailing one then we need to ask for more data. // Otherwise we just poll again and wait for one to be produced... - if block.MsgSet.PartialTrailingMessage { + if partialTrailingMessage { if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max { // we can't ask for more data, we've hit the configured limit child.sendError(ErrMessageTooLarge) @@ -471,37 +601,14 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu child.fetchSize = child.conf.Consumer.Fetch.Default atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset) - incomplete := false - prelude := true - var messages []*ConsumerMessage - for _, msgBlock := range block.MsgSet.Messages { - - for _, msg := range msgBlock.Messages() { - if prelude && msg.Offset < child.offset { - continue - } - prelude = false - - if msg.Offset >= child.offset { - messages = append(messages, &ConsumerMessage{ - Topic: child.topic, - Partition: child.partition, - Key: msg.Msg.Key, - Value: msg.Msg.Value, - Offset: msg.Offset, - }) - child.offset = msg.Offset + 1 - } else { - incomplete = true - } - } - + if control, err := block.Records.isControl(); err != nil || control { + return nil, err } - if incomplete || len(messages) == 0 { - return nil, ErrIncompleteResponse + if block.Records.recordsType == legacyRecords { + return child.parseMessages(block.Records.msgSet) } - return messages, nil + return child.parseRecords(block.Records.recordBatch) } // brokerConsumer @@ -538,7 +645,7 @@ func (bc *brokerConsumer) subscriptionManager() { var buffer []*partitionConsumer // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer - // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks + // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give // it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available, // so the main goroutine can block waiting for work if it has none. @@ -669,8 +776,12 @@ func (bc *brokerConsumer) abort(err error) { child.trigger <- none{} } - for newSubscription := range bc.newSubscriptions { - for _, child := range newSubscription { + for newSubscriptions := range bc.newSubscriptions { + if len(newSubscriptions) == 0 { + <-bc.wait + continue + } + for _, child := range newSubscriptions { child.sendError(err) child.trigger <- none{} } @@ -682,6 +793,17 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { MinBytes: bc.consumer.conf.Consumer.Fetch.Min, MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond), } + if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) { + request.Version = 2 + } + if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) { + request.Version = 3 + request.MaxBytes = MaxResponseSize + } + if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 4 + request.Isolation = ReadUncommitted // We don't support yet transactions. + } for child := range bc.subscriptions { request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize) diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go index 9b8fcd7..483be33 100644 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go @@ -8,7 +8,7 @@ func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error { return pe.putString(r.ConsumerGroup) } -func (r *ConsumerMetadataRequest) decode(pd packetDecoder) (err error) { +func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) { r.ConsumerGroup, err = pd.getString() return err } @@ -20,3 +20,7 @@ func (r *ConsumerMetadataRequest) key() int16 { func (r *ConsumerMetadataRequest) version() int16 { return 0 } + +func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion { + return V0_8_2_0 +} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go index d6b5614..6b9632b 100644 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go @@ -13,7 +13,7 @@ type ConsumerMetadataResponse struct { CoordinatorPort int32 // deprecated: use Coordinator.Addr() } -func (r *ConsumerMetadataResponse) decode(pd packetDecoder) (err error) { +func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) { tmp, err := pd.getInt16() if err != nil { return err @@ -71,3 +71,15 @@ func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error { pe.putInt32(r.CoordinatorPort) return nil } + +func (r *ConsumerMetadataResponse) key() int16 { + return 10 +} + +func (r *ConsumerMetadataResponse) version() int16 { + return 0 +} + +func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion { + return V0_8_2_0 +} diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go index 5c28607..1f14443 100644 --- a/vendor/github.com/Shopify/sarama/crc32_field.go +++ b/vendor/github.com/Shopify/sarama/crc32_field.go @@ -2,13 +2,23 @@ package sarama import ( "encoding/binary" + "fmt" + "hash/crc32" +) + +type crcPolynomial int8 - "github.com/klauspost/crc32" +const ( + crcIEEE crcPolynomial = iota + crcCastagnoli ) +var castagnoliTable = crc32.MakeTable(crc32.Castagnoli) + // crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s. type crc32Field struct { startOffset int + polynomial crcPolynomial } func (c *crc32Field) saveOffset(in int) { @@ -19,18 +29,41 @@ func (c *crc32Field) reserveLength() int { return 4 } +func newCRC32Field(polynomial crcPolynomial) *crc32Field { + return &crc32Field{polynomial: polynomial} +} + func (c *crc32Field) run(curOffset int, buf []byte) error { - crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset]) + crc, err := c.crc(curOffset, buf) + if err != nil { + return err + } binary.BigEndian.PutUint32(buf[c.startOffset:], crc) return nil } func (c *crc32Field) check(curOffset int, buf []byte) error { - crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset]) + crc, err := c.crc(curOffset, buf) + if err != nil { + return err + } - if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) { - return PacketDecodingError{"CRC didn't match"} + expected := binary.BigEndian.Uint32(buf[c.startOffset:]) + if crc != expected { + return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)} } return nil } +func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) { + var tab *crc32.Table + switch c.polynomial { + case crcIEEE: + tab = crc32.IEEETable + case crcCastagnoli: + tab = castagnoliTable + default: + return 0, PacketDecodingError{"invalid CRC type"} + } + return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil +} diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/Shopify/sarama/create_partitions_request.go new file mode 100644 index 0000000..af321e9 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_partitions_request.go @@ -0,0 +1,121 @@ +package sarama + +import "time" + +type CreatePartitionsRequest struct { + TopicPartitions map[string]*TopicPartition + Timeout time.Duration + ValidateOnly bool +} + +func (c *CreatePartitionsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil { + return err + } + + for topic, partition := range c.TopicPartitions { + if err := pe.putString(topic); err != nil { + return err + } + if err := partition.encode(pe); err != nil { + return err + } + } + + pe.putInt32(int32(c.Timeout / time.Millisecond)) + + pe.putBool(c.ValidateOnly) + + return nil +} + +func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + c.TopicPartitions = make(map[string]*TopicPartition, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicPartitions[topic] = new(TopicPartition) + if err := c.TopicPartitions[topic].decode(pd, version); err != nil { + return err + } + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + c.Timeout = time.Duration(timeout) * time.Millisecond + + if c.ValidateOnly, err = pd.getBool(); err != nil { + return err + } + + return nil +} + +func (r *CreatePartitionsRequest) key() int16 { + return 37 +} + +func (r *CreatePartitionsRequest) version() int16 { + return 0 +} + +func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion { + return V1_0_0_0 +} + +type TopicPartition struct { + Count int32 + Assignment [][]int32 +} + +func (t *TopicPartition) encode(pe packetEncoder) error { + pe.putInt32(t.Count) + + if len(t.Assignment) == 0 { + pe.putInt32(-1) + return nil + } + + if err := pe.putArrayLength(len(t.Assignment)); err != nil { + return err + } + + for _, assign := range t.Assignment { + if err := pe.putInt32Array(assign); err != nil { + return err + } + } + + return nil +} + +func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) { + if t.Count, err = pd.getInt32(); err != nil { + return err + } + + n, err := pd.getInt32() + if err != nil { + return err + } + if n <= 0 { + return nil + } + t.Assignment = make([][]int32, n) + + for i := 0; i < int(n); i++ { + if t.Assignment[i], err = pd.getInt32Array(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go new file mode 100644 index 0000000..abd621c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_partitions_response.go @@ -0,0 +1,94 @@ +package sarama + +import "time" + +type CreatePartitionsResponse struct { + ThrottleTime time.Duration + TopicPartitionErrors map[string]*TopicPartitionError +} + +func (c *CreatePartitionsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) + if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil { + return err + } + + for topic, partitionError := range c.TopicPartitionErrors { + if err := pe.putString(topic); err != nil { + return err + } + if err := partitionError.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicPartitionErrors[topic] = new(TopicPartitionError) + if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (r *CreatePartitionsResponse) key() int16 { + return 37 +} + +func (r *CreatePartitionsResponse) version() int16 { + return 0 +} + +func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion { + return V1_0_0_0 +} + +type TopicPartitionError struct { + Err KError + ErrMsg *string +} + +func (t *TopicPartitionError) encode(pe packetEncoder) error { + pe.putInt16(int16(t.Err)) + + if err := pe.putNullableString(t.ErrMsg); err != nil { + return err + } + + return nil +} + +func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + t.Err = KError(kerr) + + if t.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/Shopify/sarama/create_topics_request.go new file mode 100644 index 0000000..709c0a4 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_topics_request.go @@ -0,0 +1,174 @@ +package sarama + +import ( + "time" +) + +type CreateTopicsRequest struct { + Version int16 + + TopicDetails map[string]*TopicDetail + Timeout time.Duration + ValidateOnly bool +} + +func (c *CreateTopicsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(c.TopicDetails)); err != nil { + return err + } + for topic, detail := range c.TopicDetails { + if err := pe.putString(topic); err != nil { + return err + } + if err := detail.encode(pe); err != nil { + return err + } + } + + pe.putInt32(int32(c.Timeout / time.Millisecond)) + + if c.Version >= 1 { + pe.putBool(c.ValidateOnly) + } + + return nil +} + +func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicDetails = make(map[string]*TopicDetail, n) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicDetails[topic] = new(TopicDetail) + if err = c.TopicDetails[topic].decode(pd, version); err != nil { + return err + } + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + c.Timeout = time.Duration(timeout) * time.Millisecond + + if version >= 1 { + c.ValidateOnly, err = pd.getBool() + if err != nil { + return err + } + + c.Version = version + } + + return nil +} + +func (c *CreateTopicsRequest) key() int16 { + return 19 +} + +func (c *CreateTopicsRequest) version() int16 { + return c.Version +} + +func (c *CreateTopicsRequest) requiredVersion() KafkaVersion { + switch c.Version { + case 2: + return V1_0_0_0 + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} + +type TopicDetail struct { + NumPartitions int32 + ReplicationFactor int16 + ReplicaAssignment map[int32][]int32 + ConfigEntries map[string]*string +} + +func (t *TopicDetail) encode(pe packetEncoder) error { + pe.putInt32(t.NumPartitions) + pe.putInt16(t.ReplicationFactor) + + if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil { + return err + } + for partition, assignment := range t.ReplicaAssignment { + pe.putInt32(partition) + if err := pe.putInt32Array(assignment); err != nil { + return err + } + } + + if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil { + return err + } + for configKey, configValue := range t.ConfigEntries { + if err := pe.putString(configKey); err != nil { + return err + } + if err := pe.putNullableString(configValue); err != nil { + return err + } + } + + return nil +} + +func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) { + if t.NumPartitions, err = pd.getInt32(); err != nil { + return err + } + if t.ReplicationFactor, err = pd.getInt16(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.ReplicaAssignment = make(map[int32][]int32, n) + for i := 0; i < n; i++ { + replica, err := pd.getInt32() + if err != nil { + return err + } + if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil { + return err + } + } + } + + n, err = pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.ConfigEntries = make(map[string]*string, n) + for i := 0; i < n; i++ { + configKey, err := pd.getString() + if err != nil { + return err + } + if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go new file mode 100644 index 0000000..66207e0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_topics_response.go @@ -0,0 +1,112 @@ +package sarama + +import "time" + +type CreateTopicsResponse struct { + Version int16 + ThrottleTime time.Duration + TopicErrors map[string]*TopicError +} + +func (c *CreateTopicsResponse) encode(pe packetEncoder) error { + if c.Version >= 2 { + pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) + } + + if err := pe.putArrayLength(len(c.TopicErrors)); err != nil { + return err + } + for topic, topicError := range c.TopicErrors { + if err := pe.putString(topic); err != nil { + return err + } + if err := topicError.encode(pe, c.Version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) { + c.Version = version + + if version >= 2 { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicErrors = make(map[string]*TopicError, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicErrors[topic] = new(TopicError) + if err := c.TopicErrors[topic].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateTopicsResponse) key() int16 { + return 19 +} + +func (c *CreateTopicsResponse) version() int16 { + return c.Version +} + +func (c *CreateTopicsResponse) requiredVersion() KafkaVersion { + switch c.Version { + case 2: + return V1_0_0_0 + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} + +type TopicError struct { + Err KError + ErrMsg *string +} + +func (t *TopicError) encode(pe packetEncoder, version int16) error { + pe.putInt16(int16(t.Err)) + + if version >= 1 { + if err := pe.putNullableString(t.ErrMsg); err != nil { + return err + } + } + + return nil +} + +func (t *TopicError) decode(pd packetDecoder, version int16) (err error) { + kErr, err := pd.getInt16() + if err != nil { + return err + } + t.Err = KError(kErr) + + if version >= 1 { + if t.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/Shopify/sarama/delete_topics_request.go new file mode 100644 index 0000000..ed9089e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_topics_request.go @@ -0,0 +1,41 @@ +package sarama + +import "time" + +type DeleteTopicsRequest struct { + Topics []string + Timeout time.Duration +} + +func (d *DeleteTopicsRequest) encode(pe packetEncoder) error { + if err := pe.putStringArray(d.Topics); err != nil { + return err + } + pe.putInt32(int32(d.Timeout / time.Millisecond)) + + return nil +} + +func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) { + if d.Topics, err = pd.getStringArray(); err != nil { + return err + } + timeout, err := pd.getInt32() + if err != nil { + return err + } + d.Timeout = time.Duration(timeout) * time.Millisecond + return nil +} + +func (d *DeleteTopicsRequest) key() int16 { + return 20 +} + +func (d *DeleteTopicsRequest) version() int16 { + return 0 +} + +func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion { + return V0_10_1_0 +} diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/Shopify/sarama/delete_topics_response.go new file mode 100644 index 0000000..3422546 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_topics_response.go @@ -0,0 +1,78 @@ +package sarama + +import "time" + +type DeleteTopicsResponse struct { + Version int16 + ThrottleTime time.Duration + TopicErrorCodes map[string]KError +} + +func (d *DeleteTopicsResponse) encode(pe packetEncoder) error { + if d.Version >= 1 { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + } + + if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil { + return err + } + for topic, errorCode := range d.TopicErrorCodes { + if err := pe.putString(topic); err != nil { + return err + } + pe.putInt16(int16(errorCode)) + } + + return nil +} + +func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) { + if version >= 1 { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + d.Version = version + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + d.TopicErrorCodes = make(map[string]KError, n) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + errorCode, err := pd.getInt16() + if err != nil { + return err + } + + d.TopicErrorCodes[topic] = KError(errorCode) + } + + return nil +} + +func (d *DeleteTopicsResponse) key() int16 { + return 20 +} + +func (d *DeleteTopicsResponse) version() int16 { + return d.Version +} + +func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go index c9426a6..1fb3567 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_request.go +++ b/vendor/github.com/Shopify/sarama/describe_groups_request.go @@ -8,7 +8,7 @@ func (r *DescribeGroupsRequest) encode(pe packetEncoder) error { return pe.putStringArray(r.Groups) } -func (r *DescribeGroupsRequest) decode(pd packetDecoder) (err error) { +func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) { r.Groups, err = pd.getStringArray() return } @@ -21,6 +21,10 @@ func (r *DescribeGroupsRequest) version() int16 { return 0 } +func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + func (r *DescribeGroupsRequest) AddGroup(group string) { r.Groups = append(r.Groups, group) } diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go index b4b32dd..542b3a9 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_response.go +++ b/vendor/github.com/Shopify/sarama/describe_groups_response.go @@ -18,7 +18,7 @@ func (r *DescribeGroupsResponse) encode(pe packetEncoder) error { return nil } -func (r *DescribeGroupsResponse) decode(pd packetDecoder) (err error) { +func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) { n, err := pd.getArrayLength() if err != nil { return err @@ -35,6 +35,18 @@ func (r *DescribeGroupsResponse) decode(pd packetDecoder) (err error) { return nil } +func (r *DescribeGroupsResponse) key() int16 { + return 15 +} + +func (r *DescribeGroupsResponse) version() int16 { + return 0 +} + +func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + type GroupDescription struct { Err KError GroupId string @@ -77,12 +89,13 @@ func (gd *GroupDescription) encode(pe packetEncoder) error { } func (gd *GroupDescription) decode(pd packetDecoder) (err error) { - if kerr, err := pd.getInt16(); err != nil { + kerr, err := pd.getInt16() + if err != nil { return err - } else { - gd.Err = KError(kerr) } + gd.Err = KError(kerr) + if gd.GroupId, err = pd.getString(); err != nil { return } @@ -160,3 +173,15 @@ func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) { return nil } + +func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { + assignment := new(ConsumerGroupMemberAssignment) + err := decode(gmd.MemberAssignment, assignment) + return assignment, err +} + +func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) { + metadata := new(ConsumerGroupMemberMetadata) + err := decode(gmd.MemberMetadata, metadata) + return metadata, err +} diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml new file mode 100644 index 0000000..294fcdb --- /dev/null +++ b/vendor/github.com/Shopify/sarama/dev.yml @@ -0,0 +1,10 @@ +name: sarama + +up: + - go: + version: '1.9' + +commands: + test: + run: make test + desc: 'run unit tests' diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go index b91efaa..7ce3bc0 100644 --- a/vendor/github.com/Shopify/sarama/encoder_decoder.go +++ b/vendor/github.com/Shopify/sarama/encoder_decoder.go @@ -1,6 +1,10 @@ package sarama -import "fmt" +import ( + "fmt" + + "github.com/rcrowley/go-metrics" +) // Encoder is the interface that wraps the basic Encode method. // Anything implementing Encoder can be turned into bytes using Kafka's encoding rules. @@ -8,8 +12,8 @@ type encoder interface { encode(pe packetEncoder) error } -// Encode takes an Encoder and turns it into bytes. -func encode(e encoder) ([]byte, error) { +// Encode takes an Encoder and turns it into bytes while potentially recording metrics. +func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) { if e == nil { return nil, nil } @@ -27,6 +31,7 @@ func encode(e encoder) ([]byte, error) { } realEnc.raw = make([]byte, prepEnc.length) + realEnc.registry = metricRegistry err = e.encode(&realEnc) if err != nil { return nil, err @@ -41,6 +46,10 @@ type decoder interface { decode(pd packetDecoder) error } +type versionedDecoder interface { + decode(pd packetDecoder, version int16) error +} + // Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes, // interpreted using Kafka's encoding rules. func decode(buf []byte, in decoder) error { @@ -60,3 +69,21 @@ func decode(buf []byte, in decoder) error { return nil } + +func versionedDecode(buf []byte, in versionedDecoder, version int16) error { + if buf == nil { + return nil + } + + helper := realDecoder{raw: buf} + err := in.decode(&helper, version) + if err != nil { + return err + } + + if helper.off != len(buf) { + return PacketDecodingError{"invalid length"} + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go index a837087..54f431a 100644 --- a/vendor/github.com/Shopify/sarama/errors.go +++ b/vendor/github.com/Shopify/sarama/errors.go @@ -37,6 +37,10 @@ var ErrShuttingDown = errors.New("kafka: message received by producer in process // ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max") +// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing +// a RecordBatch. +var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch") + // PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example, // if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that. type PacketEncodingError struct { @@ -71,43 +75,73 @@ type KError int16 // Numeric error codes returned by the Kafka server. const ( - ErrNoError KError = 0 - ErrUnknown KError = -1 - ErrOffsetOutOfRange KError = 1 - ErrInvalidMessage KError = 2 - ErrUnknownTopicOrPartition KError = 3 - ErrInvalidMessageSize KError = 4 - ErrLeaderNotAvailable KError = 5 - ErrNotLeaderForPartition KError = 6 - ErrRequestTimedOut KError = 7 - ErrBrokerNotAvailable KError = 8 - ErrReplicaNotAvailable KError = 9 - ErrMessageSizeTooLarge KError = 10 - ErrStaleControllerEpochCode KError = 11 - ErrOffsetMetadataTooLarge KError = 12 - ErrOffsetsLoadInProgress KError = 14 - ErrConsumerCoordinatorNotAvailable KError = 15 - ErrNotCoordinatorForConsumer KError = 16 - ErrInvalidTopic KError = 17 - ErrMessageSetSizeTooLarge KError = 18 - ErrNotEnoughReplicas KError = 19 - ErrNotEnoughReplicasAfterAppend KError = 20 - ErrInvalidRequiredAcks KError = 21 - ErrIllegalGeneration KError = 22 - ErrInconsistentGroupProtocol KError = 23 - ErrInvalidGroupId KError = 24 - ErrUnknownMemberId KError = 25 - ErrInvalidSessionTimeout KError = 26 - ErrRebalanceInProgress KError = 27 - ErrInvalidCommitOffsetSize KError = 28 - ErrTopicAuthorizationFailed KError = 29 - ErrGroupAuthorizationFailed KError = 30 - ErrClusterAuthorizationFailed KError = 31 + ErrNoError KError = 0 + ErrUnknown KError = -1 + ErrOffsetOutOfRange KError = 1 + ErrInvalidMessage KError = 2 + ErrUnknownTopicOrPartition KError = 3 + ErrInvalidMessageSize KError = 4 + ErrLeaderNotAvailable KError = 5 + ErrNotLeaderForPartition KError = 6 + ErrRequestTimedOut KError = 7 + ErrBrokerNotAvailable KError = 8 + ErrReplicaNotAvailable KError = 9 + ErrMessageSizeTooLarge KError = 10 + ErrStaleControllerEpochCode KError = 11 + ErrOffsetMetadataTooLarge KError = 12 + ErrNetworkException KError = 13 + ErrOffsetsLoadInProgress KError = 14 + ErrConsumerCoordinatorNotAvailable KError = 15 + ErrNotCoordinatorForConsumer KError = 16 + ErrInvalidTopic KError = 17 + ErrMessageSetSizeTooLarge KError = 18 + ErrNotEnoughReplicas KError = 19 + ErrNotEnoughReplicasAfterAppend KError = 20 + ErrInvalidRequiredAcks KError = 21 + ErrIllegalGeneration KError = 22 + ErrInconsistentGroupProtocol KError = 23 + ErrInvalidGroupId KError = 24 + ErrUnknownMemberId KError = 25 + ErrInvalidSessionTimeout KError = 26 + ErrRebalanceInProgress KError = 27 + ErrInvalidCommitOffsetSize KError = 28 + ErrTopicAuthorizationFailed KError = 29 + ErrGroupAuthorizationFailed KError = 30 + ErrClusterAuthorizationFailed KError = 31 + ErrInvalidTimestamp KError = 32 + ErrUnsupportedSASLMechanism KError = 33 + ErrIllegalSASLState KError = 34 + ErrUnsupportedVersion KError = 35 + ErrTopicAlreadyExists KError = 36 + ErrInvalidPartitions KError = 37 + ErrInvalidReplicationFactor KError = 38 + ErrInvalidReplicaAssignment KError = 39 + ErrInvalidConfig KError = 40 + ErrNotController KError = 41 + ErrInvalidRequest KError = 42 + ErrUnsupportedForMessageFormat KError = 43 + ErrPolicyViolation KError = 44 + ErrOutOfOrderSequenceNumber KError = 45 + ErrDuplicateSequenceNumber KError = 46 + ErrInvalidProducerEpoch KError = 47 + ErrInvalidTxnState KError = 48 + ErrInvalidProducerIDMapping KError = 49 + ErrInvalidTransactionTimeout KError = 50 + ErrConcurrentTransactions KError = 51 + ErrTransactionCoordinatorFenced KError = 52 + ErrTransactionalIDAuthorizationFailed KError = 53 + ErrSecurityDisabled KError = 54 + ErrOperationNotAttempted KError = 55 + ErrKafkaStorageError KError = 56 + ErrLogDirNotFound KError = 57 + ErrSASLAuthenticationFailed KError = 58 + ErrUnknownProducerID KError = 59 + ErrReassignmentInProgress KError = 60 ) func (err KError) Error() string { // Error messages stolen/adapted from - // https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol + // https://kafka.apache.org/protocol#protocol_error_codes switch err { case ErrNoError: return "kafka server: Not an error, why are you printing me?" @@ -130,13 +164,15 @@ func (err KError) Error() string { case ErrBrokerNotAvailable: return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!" case ErrReplicaNotAvailable: - return "kafka server: Replica infomation not available, one or more brokers are down." + return "kafka server: Replica information not available, one or more brokers are down." case ErrMessageSizeTooLarge: return "kafka server: Message was too large, server rejected it to avoid allocation error." case ErrStaleControllerEpochCode: return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)." case ErrOffsetMetadataTooLarge: return "kafka server: Specified a string larger than the configured maximum for offset metadata." + case ErrNetworkException: + return "kafka server: The server disconnected before a response was received." case ErrOffsetsLoadInProgress: return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition." case ErrConsumerCoordinatorNotAvailable: @@ -173,6 +209,64 @@ func (err KError) Error() string { return "kafka server: The client is not authorized to access this group." case ErrClusterAuthorizationFailed: return "kafka server: The client is not authorized to send this request type." + case ErrInvalidTimestamp: + return "kafka server: The timestamp of the message is out of acceptable range." + case ErrUnsupportedSASLMechanism: + return "kafka server: The broker does not support the requested SASL mechanism." + case ErrIllegalSASLState: + return "kafka server: Request is not valid given the current SASL state." + case ErrUnsupportedVersion: + return "kafka server: The version of API is not supported." + case ErrTopicAlreadyExists: + return "kafka server: Topic with this name already exists." + case ErrInvalidPartitions: + return "kafka server: Number of partitions is invalid." + case ErrInvalidReplicationFactor: + return "kafka server: Replication-factor is invalid." + case ErrInvalidReplicaAssignment: + return "kafka server: Replica assignment is invalid." + case ErrInvalidConfig: + return "kafka server: Configuration is invalid." + case ErrNotController: + return "kafka server: This is not the correct controller for this cluster." + case ErrInvalidRequest: + return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details." + case ErrUnsupportedForMessageFormat: + return "kafka server: The requested operation is not supported by the message format version." + case ErrPolicyViolation: + return "kafka server: Request parameters do not satisfy the configured policy." + case ErrOutOfOrderSequenceNumber: + return "kafka server: The broker received an out of order sequence number." + case ErrDuplicateSequenceNumber: + return "kafka server: The broker received a duplicate sequence number." + case ErrInvalidProducerEpoch: + return "kafka server: Producer attempted an operation with an old epoch." + case ErrInvalidTxnState: + return "kafka server: The producer attempted a transactional operation in an invalid state." + case ErrInvalidProducerIDMapping: + return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id." + case ErrInvalidTransactionTimeout: + return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)." + case ErrConcurrentTransactions: + return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing." + case ErrTransactionCoordinatorFenced: + return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer." + case ErrTransactionalIDAuthorizationFailed: + return "kafka server: Transactional ID authorization failed." + case ErrSecurityDisabled: + return "kafka server: Security features are disabled." + case ErrOperationNotAttempted: + return "kafka server: The broker did not attempt to execute this operation." + case ErrKafkaStorageError: + return "kafka server: Disk error when trying to access log file on the disk." + case ErrLogDirNotFound: + return "kafka server: The specified log directory is not found in the broker config." + case ErrSASLAuthenticationFailed: + return "kafka server: SASL Authentication failed." + case ErrUnknownProducerID: + return "kafka server: The broker could not locate the producer metadata associated with the Producer ID." + case ErrReassignmentInProgress: + return "kafka server: A partition reassignment is in progress." } return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err) diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go index 3c00fad..8c8e3a5 100644 --- a/vendor/github.com/Shopify/sarama/fetch_request.go +++ b/vendor/github.com/Shopify/sarama/fetch_request.go @@ -5,37 +5,56 @@ type fetchRequestBlock struct { maxBytes int32 } -func (f *fetchRequestBlock) encode(pe packetEncoder) error { - pe.putInt64(f.fetchOffset) - pe.putInt32(f.maxBytes) +func (b *fetchRequestBlock) encode(pe packetEncoder) error { + pe.putInt64(b.fetchOffset) + pe.putInt32(b.maxBytes) return nil } -func (f *fetchRequestBlock) decode(pd packetDecoder) (err error) { - if f.fetchOffset, err = pd.getInt64(); err != nil { +func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) { + if b.fetchOffset, err = pd.getInt64(); err != nil { return err } - if f.maxBytes, err = pd.getInt32(); err != nil { + if b.maxBytes, err = pd.getInt32(); err != nil { return err } return nil } +// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See +// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at +// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes type FetchRequest struct { MaxWaitTime int32 MinBytes int32 + MaxBytes int32 + Version int16 + Isolation IsolationLevel blocks map[string]map[int32]*fetchRequestBlock } -func (f *FetchRequest) encode(pe packetEncoder) (err error) { +type IsolationLevel int8 + +const ( + ReadUncommitted IsolationLevel = 0 + ReadCommitted IsolationLevel = 1 +) + +func (r *FetchRequest) encode(pe packetEncoder) (err error) { pe.putInt32(-1) // replica ID is always -1 for clients - pe.putInt32(f.MaxWaitTime) - pe.putInt32(f.MinBytes) - err = pe.putArrayLength(len(f.blocks)) + pe.putInt32(r.MaxWaitTime) + pe.putInt32(r.MinBytes) + if r.Version >= 3 { + pe.putInt32(r.MaxBytes) + } + if r.Version >= 4 { + pe.putInt8(int8(r.Isolation)) + } + err = pe.putArrayLength(len(r.blocks)) if err != nil { return err } - for topic, blocks := range f.blocks { + for topic, blocks := range r.blocks { err = pe.putString(topic) if err != nil { return err @@ -55,16 +74,29 @@ func (f *FetchRequest) encode(pe packetEncoder) (err error) { return nil } -func (f *FetchRequest) decode(pd packetDecoder) (err error) { +func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version if _, err = pd.getInt32(); err != nil { return err } - if f.MaxWaitTime, err = pd.getInt32(); err != nil { + if r.MaxWaitTime, err = pd.getInt32(); err != nil { return err } - if f.MinBytes, err = pd.getInt32(); err != nil { + if r.MinBytes, err = pd.getInt32(); err != nil { return err } + if r.Version >= 3 { + if r.MaxBytes, err = pd.getInt32(); err != nil { + return err + } + } + if r.Version >= 4 { + isolation, err := pd.getInt8() + if err != nil { + return err + } + r.Isolation = IsolationLevel(isolation) + } topicCount, err := pd.getArrayLength() if err != nil { return err @@ -72,7 +104,7 @@ func (f *FetchRequest) decode(pd packetDecoder) (err error) { if topicCount == 0 { return nil } - f.blocks = make(map[string]map[int32]*fetchRequestBlock) + r.blocks = make(map[string]map[int32]*fetchRequestBlock) for i := 0; i < topicCount; i++ { topic, err := pd.getString() if err != nil { @@ -82,7 +114,7 @@ func (f *FetchRequest) decode(pd packetDecoder) (err error) { if err != nil { return err } - f.blocks[topic] = make(map[int32]*fetchRequestBlock) + r.blocks[topic] = make(map[int32]*fetchRequestBlock) for j := 0; j < partitionCount; j++ { partition, err := pd.getInt32() if err != nil { @@ -90,34 +122,49 @@ func (f *FetchRequest) decode(pd packetDecoder) (err error) { } fetchBlock := &fetchRequestBlock{} if err = fetchBlock.decode(pd); err != nil { - return nil + return err } - f.blocks[topic][partition] = fetchBlock + r.blocks[topic][partition] = fetchBlock } } return nil } -func (f *FetchRequest) key() int16 { +func (r *FetchRequest) key() int16 { return 1 } -func (f *FetchRequest) version() int16 { - return 0 +func (r *FetchRequest) version() int16 { + return r.Version +} + +func (r *FetchRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + case 3: + return V0_10_1_0 + case 4: + return V0_11_0_0 + default: + return minVersion + } } -func (f *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) { - if f.blocks == nil { - f.blocks = make(map[string]map[int32]*fetchRequestBlock) +func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*fetchRequestBlock) } - if f.blocks[topic] == nil { - f.blocks[topic] = make(map[int32]*fetchRequestBlock) + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*fetchRequestBlock) } tmp := new(fetchRequestBlock) tmp.maxBytes = maxBytes tmp.fetchOffset = fetchOffset - f.blocks[topic][partitionID] = tmp + r.blocks[topic][partitionID] = tmp } diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go index 1ac5439..86fa549 100644 --- a/vendor/github.com/Shopify/sarama/fetch_response.go +++ b/vendor/github.com/Shopify/sarama/fetch_response.go @@ -1,61 +1,142 @@ package sarama +import "time" + +type AbortedTransaction struct { + ProducerID int64 + FirstOffset int64 +} + +func (t *AbortedTransaction) decode(pd packetDecoder) (err error) { + if t.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if t.FirstOffset, err = pd.getInt64(); err != nil { + return err + } + + return nil +} + +func (t *AbortedTransaction) encode(pe packetEncoder) (err error) { + pe.putInt64(t.ProducerID) + pe.putInt64(t.FirstOffset) + + return nil +} + type FetchResponseBlock struct { Err KError HighWaterMarkOffset int64 - MsgSet MessageSet + LastStableOffset int64 + AbortedTransactions []*AbortedTransaction + Records Records } -func (pr *FetchResponseBlock) decode(pd packetDecoder) (err error) { +func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) { tmp, err := pd.getInt16() if err != nil { return err } - pr.Err = KError(tmp) + b.Err = KError(tmp) - pr.HighWaterMarkOffset, err = pd.getInt64() + b.HighWaterMarkOffset, err = pd.getInt64() if err != nil { return err } - msgSetSize, err := pd.getInt32() + if version >= 4 { + b.LastStableOffset, err = pd.getInt64() + if err != nil { + return err + } + + numTransact, err := pd.getArrayLength() + if err != nil { + return err + } + + if numTransact >= 0 { + b.AbortedTransactions = make([]*AbortedTransaction, numTransact) + } + + for i := 0; i < numTransact; i++ { + transact := new(AbortedTransaction) + if err = transact.decode(pd); err != nil { + return err + } + b.AbortedTransactions[i] = transact + } + } + + recordsSize, err := pd.getInt32() if err != nil { return err } - msgSetDecoder, err := pd.getSubset(int(msgSetSize)) + recordsDecoder, err := pd.getSubset(int(recordsSize)) if err != nil { return err } - err = (&pr.MsgSet).decode(msgSetDecoder) + if recordsSize > 0 { + if err = b.Records.decode(recordsDecoder); err != nil { + return err + } + } - return err + return nil } -type FetchResponse struct { - Blocks map[string]map[int32]*FetchResponseBlock -} +func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(b.Err)) -func (pr *FetchResponseBlock) encode(pe packetEncoder) (err error) { - pe.putInt16(int16(pr.Err)) + pe.putInt64(b.HighWaterMarkOffset) - pe.putInt64(pr.HighWaterMarkOffset) + if version >= 4 { + pe.putInt64(b.LastStableOffset) + + if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil { + return err + } + for _, transact := range b.AbortedTransactions { + if err = transact.encode(pe); err != nil { + return err + } + } + } pe.push(&lengthField{}) - err = pr.MsgSet.encode(pe) + err = b.Records.encode(pe) if err != nil { return err } return pe.pop() } -func (fr *FetchResponse) decode(pd packetDecoder) (err error) { +type FetchResponse struct { + Blocks map[string]map[int32]*FetchResponseBlock + ThrottleTime time.Duration + Version int16 // v1 requires 0.9+, v2 requires 0.10+ +} + +func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.Version >= 1 { + throttle, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttle) * time.Millisecond + } + numTopics, err := pd.getArrayLength() if err != nil { return err } - fr.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics) + r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics) for i := 0; i < numTopics; i++ { name, err := pd.getString() if err != nil { @@ -67,7 +148,7 @@ func (fr *FetchResponse) decode(pd packetDecoder) (err error) { return err } - fr.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks) + r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks) for j := 0; j < numBlocks; j++ { id, err := pd.getInt32() @@ -76,24 +157,28 @@ func (fr *FetchResponse) decode(pd packetDecoder) (err error) { } block := new(FetchResponseBlock) - err = block.decode(pd) + err = block.decode(pd, version) if err != nil { return err } - fr.Blocks[name][id] = block + r.Blocks[name][id] = block } } return nil } -func (fr *FetchResponse) encode(pe packetEncoder) (err error) { - err = pe.putArrayLength(len(fr.Blocks)) +func (r *FetchResponse) encode(pe packetEncoder) (err error) { + if r.Version >= 1 { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + } + + err = pe.putArrayLength(len(r.Blocks)) if err != nil { return err } - for topic, partitions := range fr.Blocks { + for topic, partitions := range r.Blocks { err = pe.putString(topic) if err != nil { return err @@ -106,7 +191,7 @@ func (fr *FetchResponse) encode(pe packetEncoder) (err error) { for id, block := range partitions { pe.putInt32(id) - err = block.encode(pe) + err = block.encode(pe, r.Version) if err != nil { return err } @@ -116,26 +201,49 @@ func (fr *FetchResponse) encode(pe packetEncoder) (err error) { return nil } -func (fr *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock { - if fr.Blocks == nil { +func (r *FetchResponse) key() int16 { + return 1 +} + +func (r *FetchResponse) version() int16 { + return r.Version +} + +func (r *FetchResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + case 3: + return V0_10_1_0 + case 4: + return V0_11_0_0 + default: + return minVersion + } +} + +func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock { + if r.Blocks == nil { return nil } - if fr.Blocks[topic] == nil { + if r.Blocks[topic] == nil { return nil } - return fr.Blocks[topic][partition] + return r.Blocks[topic][partition] } -func (fr *FetchResponse) AddError(topic string, partition int32, err KError) { - if fr.Blocks == nil { - fr.Blocks = make(map[string]map[int32]*FetchResponseBlock) +func (r *FetchResponse) AddError(topic string, partition int32, err KError) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*FetchResponseBlock) } - partitions, ok := fr.Blocks[topic] + partitions, ok := r.Blocks[topic] if !ok { partitions = make(map[int32]*FetchResponseBlock) - fr.Blocks[topic] = partitions + r.Blocks[topic] = partitions } frb, ok := partitions[partition] if !ok { @@ -145,20 +253,25 @@ func (fr *FetchResponse) AddError(topic string, partition int32, err KError) { frb.Err = err } -func (fr *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { - if fr.Blocks == nil { - fr.Blocks = make(map[string]map[int32]*FetchResponseBlock) +func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*FetchResponseBlock) } - partitions, ok := fr.Blocks[topic] + partitions, ok := r.Blocks[topic] if !ok { partitions = make(map[int32]*FetchResponseBlock) - fr.Blocks[topic] = partitions + r.Blocks[topic] = partitions } frb, ok := partitions[partition] if !ok { frb = new(FetchResponseBlock) partitions[partition] = frb } + + return frb +} + +func encodeKV(key, value Encoder) ([]byte, []byte) { var kb []byte var vb []byte if key != nil { @@ -167,7 +280,46 @@ func (fr *FetchResponse) AddMessage(topic string, partition int32, key, value En if value != nil { vb, _ = value.Encode() } + + return kb, vb +} + +func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { + frb := r.getOrCreateBlock(topic, partition) + kb, vb := encodeKV(key, value) msg := &Message{Key: kb, Value: vb} msgBlock := &MessageBlock{Msg: msg, Offset: offset} - frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock) + set := frb.Records.msgSet + if set == nil { + set = &MessageSet{} + frb.Records = newLegacyRecords(set) + } + set.Messages = append(set.Messages, msgBlock) +} + +func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) { + frb := r.getOrCreateBlock(topic, partition) + kb, vb := encodeKV(key, value) + rec := &Record{Key: kb, Value: vb, OffsetDelta: offset} + batch := frb.Records.recordBatch + if batch == nil { + batch = &RecordBatch{Version: 2} + frb.Records = newDefaultRecords(batch) + } + batch.addRecord(rec) +} + +func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) { + frb := r.getOrCreateBlock(topic, partition) + batch := frb.Records.recordBatch + if batch == nil { + batch = &RecordBatch{Version: 2} + frb.Records = newDefaultRecords(batch) + } + batch.LastOffsetDelta = offset +} + +func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) { + frb := r.getOrCreateBlock(topic, partition) + frb.LastStableOffset = offset } diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go index b89d290..ce49c47 100644 --- a/vendor/github.com/Shopify/sarama/heartbeat_request.go +++ b/vendor/github.com/Shopify/sarama/heartbeat_request.go @@ -20,7 +20,7 @@ func (r *HeartbeatRequest) encode(pe packetEncoder) error { return nil } -func (r *HeartbeatRequest) decode(pd packetDecoder) (err error) { +func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) { if r.GroupId, err = pd.getString(); err != nil { return } @@ -41,3 +41,7 @@ func (r *HeartbeatRequest) key() int16 { func (r *HeartbeatRequest) version() int16 { return 0 } + +func (r *HeartbeatRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go index b48b8c1..766f5fd 100644 --- a/vendor/github.com/Shopify/sarama/heartbeat_response.go +++ b/vendor/github.com/Shopify/sarama/heartbeat_response.go @@ -9,12 +9,24 @@ func (r *HeartbeatResponse) encode(pe packetEncoder) error { return nil } -func (r *HeartbeatResponse) decode(pd packetDecoder) error { - if kerr, err := pd.getInt16(); err != nil { +func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { return err - } else { - r.Err = KError(kerr) } + r.Err = KError(kerr) return nil } + +func (r *HeartbeatResponse) key() int16 { + return 12 +} + +func (r *HeartbeatResponse) version() int16 { + return 0 +} + +func (r *HeartbeatResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go index 5884d79..3a7ba17 100644 --- a/vendor/github.com/Shopify/sarama/join_group_request.go +++ b/vendor/github.com/Shopify/sarama/join_group_request.go @@ -1,11 +1,36 @@ package sarama +type GroupProtocol struct { + Name string + Metadata []byte +} + +func (p *GroupProtocol) decode(pd packetDecoder) (err error) { + p.Name, err = pd.getString() + if err != nil { + return err + } + p.Metadata, err = pd.getBytes() + return err +} + +func (p *GroupProtocol) encode(pe packetEncoder) (err error) { + if err := pe.putString(p.Name); err != nil { + return err + } + if err := pe.putBytes(p.Metadata); err != nil { + return err + } + return nil +} + type JoinGroupRequest struct { - GroupId string - SessionTimeout int32 - MemberId string - ProtocolType string - GroupProtocols map[string][]byte + GroupId string + SessionTimeout int32 + MemberId string + ProtocolType string + GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols + OrderedGroupProtocols []*GroupProtocol } func (r *JoinGroupRequest) encode(pe packetEncoder) error { @@ -20,22 +45,37 @@ func (r *JoinGroupRequest) encode(pe packetEncoder) error { return err } - if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil { - return err - } - for name, metadata := range r.GroupProtocols { - if err := pe.putString(name); err != nil { + if len(r.GroupProtocols) > 0 { + if len(r.OrderedGroupProtocols) > 0 { + return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"} + } + + if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil { return err } - if err := pe.putBytes(metadata); err != nil { + for name, metadata := range r.GroupProtocols { + if err := pe.putString(name); err != nil { + return err + } + if err := pe.putBytes(metadata); err != nil { + return err + } + } + } else { + if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil { return err } + for _, protocol := range r.OrderedGroupProtocols { + if err := protocol.encode(pe); err != nil { + return err + } + } } return nil } -func (r *JoinGroupRequest) decode(pd packetDecoder) (err error) { +func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) { if r.GroupId, err = pd.getString(); err != nil { return } @@ -62,16 +102,12 @@ func (r *JoinGroupRequest) decode(pd packetDecoder) (err error) { r.GroupProtocols = make(map[string][]byte) for i := 0; i < n; i++ { - name, err := pd.getString() - if err != nil { + protocol := &GroupProtocol{} + if err := protocol.decode(pd); err != nil { return err } - metadata, err := pd.getBytes() - if err != nil { - return err - } - - r.GroupProtocols[name] = metadata + r.GroupProtocols[protocol.Name] = protocol.Metadata + r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol) } return nil @@ -85,16 +121,19 @@ func (r *JoinGroupRequest) version() int16 { return 0 } -func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) { - if r.GroupProtocols == nil { - r.GroupProtocols = make(map[string][]byte) - } +func (r *JoinGroupRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} - r.GroupProtocols[name] = metadata +func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) { + r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{ + Name: name, + Metadata: metadata, + }) } func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error { - bin, err := encode(metadata) + bin, err := encode(metadata, nil) if err != nil { return err } diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go index 16f6b9b..6d35fe3 100644 --- a/vendor/github.com/Shopify/sarama/join_group_response.go +++ b/vendor/github.com/Shopify/sarama/join_group_response.go @@ -52,13 +52,14 @@ func (r *JoinGroupResponse) encode(pe packetEncoder) error { return nil } -func (r *JoinGroupResponse) decode(pd packetDecoder) (err error) { - if kerr, err := pd.getInt16(); err != nil { +func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { return err - } else { - r.Err = KError(kerr) } + r.Err = KError(kerr) + if r.GenerationId, err = pd.getInt32(); err != nil { return } @@ -100,3 +101,15 @@ func (r *JoinGroupResponse) decode(pd packetDecoder) (err error) { return nil } + +func (r *JoinGroupResponse) key() int16 { + return 11 +} + +func (r *JoinGroupResponse) version() int16 { + return 0 +} + +func (r *JoinGroupResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go index cdb4d14..e177427 100644 --- a/vendor/github.com/Shopify/sarama/leave_group_request.go +++ b/vendor/github.com/Shopify/sarama/leave_group_request.go @@ -16,7 +16,7 @@ func (r *LeaveGroupRequest) encode(pe packetEncoder) error { return nil } -func (r *LeaveGroupRequest) decode(pd packetDecoder) (err error) { +func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) { if r.GroupId, err = pd.getString(); err != nil { return } @@ -34,3 +34,7 @@ func (r *LeaveGroupRequest) key() int16 { func (r *LeaveGroupRequest) version() int16 { return 0 } + +func (r *LeaveGroupRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go index bad1dba..d60c626 100644 --- a/vendor/github.com/Shopify/sarama/leave_group_response.go +++ b/vendor/github.com/Shopify/sarama/leave_group_response.go @@ -9,12 +9,24 @@ func (r *LeaveGroupResponse) encode(pe packetEncoder) error { return nil } -func (r *LeaveGroupResponse) decode(pd packetDecoder) (err error) { - if kerr, err := pd.getInt16(); err != nil { +func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { return err - } else { - r.Err = KError(kerr) } + r.Err = KError(kerr) return nil } + +func (r *LeaveGroupResponse) key() int16 { + return 13 +} + +func (r *LeaveGroupResponse) version() int16 { + return 0 +} + +func (r *LeaveGroupResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go index 70078be..576b1a6 100644 --- a/vendor/github.com/Shopify/sarama/length_field.go +++ b/vendor/github.com/Shopify/sarama/length_field.go @@ -27,3 +27,43 @@ func (l *lengthField) check(curOffset int, buf []byte) error { return nil } + +type varintLengthField struct { + startOffset int + length int64 +} + +func (l *varintLengthField) decode(pd packetDecoder) error { + var err error + l.length, err = pd.getVarint() + return err +} + +func (l *varintLengthField) saveOffset(in int) { + l.startOffset = in +} + +func (l *varintLengthField) adjustLength(currOffset int) int { + oldFieldSize := l.reserveLength() + l.length = int64(currOffset - l.startOffset - oldFieldSize) + + return l.reserveLength() - oldFieldSize +} + +func (l *varintLengthField) reserveLength() int { + var tmp [binary.MaxVarintLen64]byte + return binary.PutVarint(tmp[:], l.length) +} + +func (l *varintLengthField) run(curOffset int, buf []byte) error { + binary.PutVarint(buf[l.startOffset:], l.length) + return nil +} + +func (l *varintLengthField) check(curOffset int, buf []byte) error { + if int64(curOffset-l.startOffset-l.reserveLength()) != l.length { + return PacketDecodingError{"length field invalid"} + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go index 4d74c26..3b16abf 100644 --- a/vendor/github.com/Shopify/sarama/list_groups_request.go +++ b/vendor/github.com/Shopify/sarama/list_groups_request.go @@ -7,7 +7,7 @@ func (r *ListGroupsRequest) encode(pe packetEncoder) error { return nil } -func (r *ListGroupsRequest) decode(pd packetDecoder) (err error) { +func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) { return nil } @@ -18,3 +18,7 @@ func (r *ListGroupsRequest) key() int16 { func (r *ListGroupsRequest) version() int16 { return 0 } + +func (r *ListGroupsRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go index 2f53149..56115d4 100644 --- a/vendor/github.com/Shopify/sarama/list_groups_response.go +++ b/vendor/github.com/Shopify/sarama/list_groups_response.go @@ -23,13 +23,14 @@ func (r *ListGroupsResponse) encode(pe packetEncoder) error { return nil } -func (r *ListGroupsResponse) decode(pd packetDecoder) error { - if kerr, err := pd.getInt16(); err != nil { +func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { return err - } else { - r.Err = KError(kerr) } + r.Err = KError(kerr) + n, err := pd.getArrayLength() if err != nil { return err @@ -54,3 +55,15 @@ func (r *ListGroupsResponse) decode(pd packetDecoder) error { return nil } + +func (r *ListGroupsResponse) key() int16 { + return 16 +} + +func (r *ListGroupsResponse) version() int16 { + return 0 +} + +func (r *ListGroupsResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go index c4bdb9e..bd5650b 100644 --- a/vendor/github.com/Shopify/sarama/message.go +++ b/vendor/github.com/Shopify/sarama/message.go @@ -5,6 +5,10 @@ import ( "compress/gzip" "fmt" "io/ioutil" + "time" + + "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4" ) // CompressionCodec represents the various compression codecs recognized by Kafka in messages. @@ -17,29 +21,35 @@ const ( CompressionNone CompressionCodec = 0 CompressionGZIP CompressionCodec = 1 CompressionSnappy CompressionCodec = 2 + CompressionLZ4 CompressionCodec = 3 ) -// The spec just says: "This is a version id used to allow backwards compatible evolution of the message -// binary format." but it doesn't say what the current value is, so presumably 0... -const messageFormat int8 = 0 - type Message struct { - Codec CompressionCodec // codec used to compress the message contents - Key []byte // the message key, may be nil - Value []byte // the message contents - Set *MessageSet // the message set a message might wrap + Codec CompressionCodec // codec used to compress the message contents + Key []byte // the message key, may be nil + Value []byte // the message contents + Set *MessageSet // the message set a message might wrap + Version int8 // v1 requires Kafka 0.10 + Timestamp time.Time // the timestamp of the message (version 1+ only) compressedCache []byte + compressedSize int // used for computing the compression ratio metrics } func (m *Message) encode(pe packetEncoder) error { - pe.push(&crc32Field{}) + pe.push(newCRC32Field(crcIEEE)) - pe.putInt8(messageFormat) + pe.putInt8(m.Version) attributes := int8(m.Codec) & compressionCodecMask pe.putInt8(attributes) + if m.Version >= 1 { + if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil { + return err + } + } + err := pe.putBytes(m.Key) if err != nil { return err @@ -50,7 +60,7 @@ func (m *Message) encode(pe packetEncoder) error { if m.compressedCache != nil { payload = m.compressedCache m.compressedCache = nil - } else { + } else if m.Value != nil { switch m.Codec { case CompressionNone: payload = m.Value @@ -66,12 +76,26 @@ func (m *Message) encode(pe packetEncoder) error { m.compressedCache = buf.Bytes() payload = m.compressedCache case CompressionSnappy: - tmp := snappyEncode(m.Value) + tmp := snappy.Encode(m.Value) m.compressedCache = tmp payload = m.compressedCache + case CompressionLZ4: + var buf bytes.Buffer + writer := lz4.NewWriter(&buf) + if _, err = writer.Write(m.Value); err != nil { + return err + } + if err = writer.Close(); err != nil { + return err + } + m.compressedCache = buf.Bytes() + payload = m.compressedCache + default: return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)} } + // Keep in mind the compressed payload size for metric gathering + m.compressedSize = len(payload) } if err = pe.putBytes(payload); err != nil { @@ -82,17 +106,18 @@ func (m *Message) encode(pe packetEncoder) error { } func (m *Message) decode(pd packetDecoder) (err error) { - err = pd.push(&crc32Field{}) + err = pd.push(newCRC32Field(crcIEEE)) if err != nil { return err } - format, err := pd.getInt8() + m.Version, err = pd.getInt8() if err != nil { return err } - if format != messageFormat { - return PacketDecodingError{"unexpected messageFormat"} + + if m.Version > 1 { + return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)} } attribute, err := pd.getInt8() @@ -101,6 +126,12 @@ func (m *Message) decode(pd packetDecoder) (err error) { } m.Codec = CompressionCodec(attribute & compressionCodecMask) + if m.Version == 1 { + if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil { + return err + } + } + m.Key, err = pd.getBytes() if err != nil { return err @@ -111,12 +142,16 @@ func (m *Message) decode(pd packetDecoder) (err error) { return err } + // Required for deep equal assertion during tests but might be useful + // for future metrics about the compression ratio in fetch requests + m.compressedSize = len(m.Value) + switch m.Codec { case CompressionNone: // nothing to do case CompressionGZIP: if m.Value == nil { - return PacketDecodingError{"GZIP compression specified, but no data to uncompress"} + break } reader, err := gzip.NewReader(bytes.NewReader(m.Value)) if err != nil { @@ -130,14 +165,26 @@ func (m *Message) decode(pd packetDecoder) (err error) { } case CompressionSnappy: if m.Value == nil { - return PacketDecodingError{"Snappy compression specified, but no data to uncompress"} + break } - if m.Value, err = snappyDecode(m.Value); err != nil { + if m.Value, err = snappy.Decode(m.Value); err != nil { return err } if err := m.decodeSet(); err != nil { return err } + case CompressionLZ4: + if m.Value == nil { + break + } + reader := lz4.NewReader(bytes.NewReader(m.Value)) + if m.Value, err = ioutil.ReadAll(reader); err != nil { + return err + } + if err := m.decodeSet(); err != nil { + return err + } + default: return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)} } diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go index 130cfd4..9a26b55 100644 --- a/vendor/github.com/Shopify/sarama/metadata_request.go +++ b/vendor/github.com/Shopify/sarama/metadata_request.go @@ -4,14 +4,14 @@ type MetadataRequest struct { Topics []string } -func (mr *MetadataRequest) encode(pe packetEncoder) error { - err := pe.putArrayLength(len(mr.Topics)) +func (r *MetadataRequest) encode(pe packetEncoder) error { + err := pe.putArrayLength(len(r.Topics)) if err != nil { return err } - for i := range mr.Topics { - err = pe.putString(mr.Topics[i]) + for i := range r.Topics { + err = pe.putString(r.Topics[i]) if err != nil { return err } @@ -19,7 +19,7 @@ func (mr *MetadataRequest) encode(pe packetEncoder) error { return nil } -func (mr *MetadataRequest) decode(pd packetDecoder) error { +func (r *MetadataRequest) decode(pd packetDecoder, version int16) error { topicCount, err := pd.getArrayLength() if err != nil { return err @@ -28,21 +28,25 @@ func (mr *MetadataRequest) decode(pd packetDecoder) error { return nil } - mr.Topics = make([]string, topicCount) - for i := range mr.Topics { + r.Topics = make([]string, topicCount) + for i := range r.Topics { topic, err := pd.getString() if err != nil { return err } - mr.Topics[i] = topic + r.Topics[i] = topic } return nil } -func (mr *MetadataRequest) key() int16 { +func (r *MetadataRequest) key() int16 { return 3 } -func (mr *MetadataRequest) version() int16 { +func (r *MetadataRequest) version() int16 { return 0 } + +func (r *MetadataRequest) requiredVersion() KafkaVersion { + return minVersion +} diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go index b82221f..f9d6a42 100644 --- a/vendor/github.com/Shopify/sarama/metadata_response.go +++ b/vendor/github.com/Shopify/sarama/metadata_response.go @@ -118,16 +118,16 @@ type MetadataResponse struct { Topics []*TopicMetadata } -func (m *MetadataResponse) decode(pd packetDecoder) (err error) { +func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { n, err := pd.getArrayLength() if err != nil { return err } - m.Brokers = make([]*Broker, n) + r.Brokers = make([]*Broker, n) for i := 0; i < n; i++ { - m.Brokers[i] = new(Broker) - err = m.Brokers[i].decode(pd) + r.Brokers[i] = new(Broker) + err = r.Brokers[i].decode(pd) if err != nil { return err } @@ -138,10 +138,10 @@ func (m *MetadataResponse) decode(pd packetDecoder) (err error) { return err } - m.Topics = make([]*TopicMetadata, n) + r.Topics = make([]*TopicMetadata, n) for i := 0; i < n; i++ { - m.Topics[i] = new(TopicMetadata) - err = m.Topics[i].decode(pd) + r.Topics[i] = new(TopicMetadata) + err = r.Topics[i].decode(pd) if err != nil { return err } @@ -150,23 +150,23 @@ func (m *MetadataResponse) decode(pd packetDecoder) (err error) { return nil } -func (m *MetadataResponse) encode(pe packetEncoder) error { - err := pe.putArrayLength(len(m.Brokers)) +func (r *MetadataResponse) encode(pe packetEncoder) error { + err := pe.putArrayLength(len(r.Brokers)) if err != nil { return err } - for _, broker := range m.Brokers { + for _, broker := range r.Brokers { err = broker.encode(pe) if err != nil { return err } } - err = pe.putArrayLength(len(m.Topics)) + err = pe.putArrayLength(len(r.Topics)) if err != nil { return err } - for _, tm := range m.Topics { + for _, tm := range r.Topics { err = tm.encode(pe) if err != nil { return err @@ -176,16 +176,28 @@ func (m *MetadataResponse) encode(pe packetEncoder) error { return nil } +func (r *MetadataResponse) key() int16 { + return 3 +} + +func (r *MetadataResponse) version() int16 { + return 0 +} + +func (r *MetadataResponse) requiredVersion() KafkaVersion { + return minVersion +} + // testing API -func (m *MetadataResponse) AddBroker(addr string, id int32) { - m.Brokers = append(m.Brokers, &Broker{id: id, addr: addr}) +func (r *MetadataResponse) AddBroker(addr string, id int32) { + r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr}) } -func (m *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata { +func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata { var tmatch *TopicMetadata - for _, tm := range m.Topics { + for _, tm := range r.Topics { if tm.Name == topic { tmatch = tm goto foundTopic @@ -194,7 +206,7 @@ func (m *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata { tmatch = new(TopicMetadata) tmatch.Name = topic - m.Topics = append(m.Topics, tmatch) + r.Topics = append(r.Topics, tmatch) foundTopic: @@ -202,8 +214,8 @@ foundTopic: return tmatch } -func (m *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) { - tmatch := m.AddTopic(topic, ErrNoError) +func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) { + tmatch := r.AddTopic(topic, ErrNoError) var pmatch *PartitionMetadata for _, pm := range tmatch.Partitions { diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go new file mode 100644 index 0000000..4869708 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/metrics.go @@ -0,0 +1,51 @@ +package sarama + +import ( + "fmt" + "strings" + + "github.com/rcrowley/go-metrics" +) + +// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library: +// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution, +// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements. +// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38 +const ( + metricsReservoirSize = 1028 + metricsAlphaFactor = 0.015 +) + +func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram { + return r.GetOrRegister(name, func() metrics.Histogram { + return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor)) + }).(metrics.Histogram) +} + +func getMetricNameForBroker(name string, broker *Broker) string { + // Use broker id like the Java client as it does not contain '.' or ':' characters that + // can be interpreted as special character by monitoring tool (e.g. Graphite) + return fmt.Sprintf(name+"-for-broker-%d", broker.ID()) +} + +func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter { + return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r) +} + +func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram { + return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r) +} + +func getMetricNameForTopic(name string, topic string) string { + // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy + // cf. KAFKA-1902 and KAFKA-2337 + return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1)) +} + +func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter { + return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r) +} + +func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram { + return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r) +} diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go index c422565..55ef1e2 100644 --- a/vendor/github.com/Shopify/sarama/mockbroker.go +++ b/vendor/github.com/Shopify/sarama/mockbroker.go @@ -20,6 +20,10 @@ const ( type requestHandlerFunc func(req *request) (res encoder) +// RequestNotifierFunc is invoked when a mock broker processes a request successfully +// and will provides the number of bytes read and written. +type RequestNotifierFunc func(bytesRead, bytesWritten int) + // MockBroker is a mock Kafka broker that is used in unit tests. It is exposed // to facilitate testing of higher level or specialized consumers and producers // built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol, @@ -54,13 +58,14 @@ type MockBroker struct { t TestReporter latency time.Duration handler requestHandlerFunc + notifier RequestNotifierFunc history []RequestResponse lock sync.Mutex } // RequestResponse represents a Request/Response pair processed by MockBroker. type RequestResponse struct { - Request requestBody + Request protocolBody Response encoder } @@ -85,6 +90,14 @@ func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { }) } +// SetNotifier set a function that will get invoked whenever a request has been +// processed successfully and will provide the number of bytes read and written +func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) { + b.lock.Lock() + b.notifier = notifier + b.lock.Unlock() +} + // BrokerID returns broker ID assigned to the broker. func (b *MockBroker) BrokerID() int32 { return b.brokerID @@ -180,7 +193,7 @@ func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) resHeader := make([]byte, 8) for { - req, err := decodeRequest(conn) + req, bytesRead, err := decodeRequest(conn) if err != nil { Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req)) b.serverError(err) @@ -202,12 +215,17 @@ func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) } Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res) - encodedRes, err := encode(res) + encodedRes, err := encode(res, nil) if err != nil { b.serverError(err) break } if len(encodedRes) == 0 { + b.lock.Lock() + if b.notifier != nil { + b.notifier(bytesRead, 0) + } + b.lock.Unlock() continue } @@ -221,6 +239,12 @@ func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) b.serverError(err) break } + + b.lock.Lock() + if b.notifier != nil { + b.notifier(bytesRead, len(resHeader)+len(encodedRes)) + } + b.lock.Unlock() } Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err) } @@ -264,6 +288,15 @@ func NewMockBroker(t TestReporter, brokerID int32) *MockBroker { // NewMockBrokerAddr behaves like newMockBroker but listens on the address you give // it rather than just some ephemeral port. func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker { + listener, err := net.Listen("tcp", addr) + if err != nil { + t.Fatal(err) + } + return NewMockBrokerListener(t, brokerID, listener) +} + +// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified. +func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker { var err error broker := &MockBroker{ @@ -272,13 +305,10 @@ func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker t: t, brokerID: brokerID, expectations: make(chan encoder, 512), + listener: listener, } broker.handler = broker.defaultRequestHandler - broker.listener, err = net.Listen("tcp", addr) - if err != nil { - t.Fatal(err) - } Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String()) _, portStr, err := net.SplitHostPort(broker.listener.Addr().String()) if err != nil { diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go index 2f76df4..f79a9d5 100644 --- a/vendor/github.com/Shopify/sarama/mockresponses.go +++ b/vendor/github.com/Shopify/sarama/mockresponses.go @@ -17,7 +17,7 @@ type TestReporter interface { // allows generating a response based on a request body. MockResponses are used // to program behavior of MockBroker in tests. type MockResponse interface { - For(reqBody decoder) (res encoder) + For(reqBody versionedDecoder) (res encoder) } // MockWrapper is a mock response builder that returns a particular concrete @@ -26,7 +26,7 @@ type MockWrapper struct { res encoder } -func (mw *MockWrapper) For(reqBody decoder) (res encoder) { +func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) { return mw.res } @@ -58,7 +58,7 @@ func NewMockSequence(responses ...interface{}) *MockSequence { return ms } -func (mc *MockSequence) For(reqBody decoder) (res encoder) { +func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) { res = mc.responses[0].For(reqBody) if len(mc.responses) > 1 { mc.responses = mc.responses[1:] @@ -96,7 +96,7 @@ func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMet return mmr } -func (mmr *MockMetadataResponse) For(reqBody decoder) encoder { +func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder { metadataRequest := reqBody.(*MetadataRequest) metadataResponse := &MetadataResponse{} for addr, brokerID := range mmr.brokers { @@ -122,6 +122,7 @@ func (mmr *MockMetadataResponse) For(reqBody decoder) encoder { type MockOffsetResponse struct { offsets map[string]map[int32]map[int64]int64 t TestReporter + version int16 } func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse { @@ -131,6 +132,11 @@ func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse { } } +func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse { + mor.version = version + return mor +} + func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse { partitions := mor.offsets[topic] if partitions == nil { @@ -146,9 +152,9 @@ func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, of return mor } -func (mor *MockOffsetResponse) For(reqBody decoder) encoder { +func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder { offsetRequest := reqBody.(*OffsetRequest) - offsetResponse := &OffsetResponse{} + offsetResponse := &OffsetResponse{Version: mor.version} for topic, partitions := range offsetRequest.blocks { for partition, block := range partitions { offset := mor.getOffset(topic, partition, block.time) @@ -180,6 +186,7 @@ type MockFetchResponse struct { highWaterMarks map[string]map[int32]int64 t TestReporter batchSize int + version int16 } func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse { @@ -191,6 +198,11 @@ func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse { } } +func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse { + mfr.version = version + return mfr +} + func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse { partitions := mfr.messages[topic] if partitions == nil { @@ -216,9 +228,11 @@ func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, of return mfr } -func (mfr *MockFetchResponse) For(reqBody decoder) encoder { +func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder { fetchRequest := reqBody.(*FetchRequest) - res := &FetchResponse{} + res := &FetchResponse{ + Version: mfr.version, + } for topic, partitions := range fetchRequest.blocks { for partition, block := range partitions { initialOffset := block.fetchOffset @@ -298,7 +312,7 @@ func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *M return mr } -func (mr *MockConsumerMetadataResponse) For(reqBody decoder) encoder { +func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder { req := reqBody.(*ConsumerMetadataRequest) group := req.ConsumerGroup res := &ConsumerMetadataResponse{} @@ -340,7 +354,7 @@ func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int3 return mr } -func (mr *MockOffsetCommitResponse) For(reqBody decoder) encoder { +func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder { req := reqBody.(*OffsetCommitRequest) group := req.ConsumerGroup res := &OffsetCommitResponse{} @@ -370,14 +384,20 @@ func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int3 // MockProduceResponse is a `ProduceResponse` builder. type MockProduceResponse struct { - errors map[string]map[int32]KError - t TestReporter + version int16 + errors map[string]map[int32]KError + t TestReporter } func NewMockProduceResponse(t TestReporter) *MockProduceResponse { return &MockProduceResponse{t: t} } +func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse { + mr.version = version + return mr +} + func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse { if mr.errors == nil { mr.errors = make(map[string]map[int32]KError) @@ -391,10 +411,12 @@ func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KE return mr } -func (mr *MockProduceResponse) For(reqBody decoder) encoder { +func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder { req := reqBody.(*ProduceRequest) - res := &ProduceResponse{} - for topic, partitions := range req.msgSets { + res := &ProduceResponse{ + Version: mr.version, + } + for topic, partitions := range req.records { for partition := range partitions { res.AddTopicPartition(topic, partition, mr.getError(topic, partition)) } @@ -442,7 +464,7 @@ func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int3 return mr } -func (mr *MockOffsetFetchResponse) For(reqBody decoder) encoder { +func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder { req := reqBody.(*OffsetFetchRequest) group := req.ConsumerGroup res := &OffsetFetchResponse{} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go index f518152..b21ea63 100644 --- a/vendor/github.com/Shopify/sarama/offset_commit_request.go +++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go @@ -16,27 +16,27 @@ type offsetCommitRequestBlock struct { metadata string } -func (r *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error { - pe.putInt64(r.offset) +func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error { + pe.putInt64(b.offset) if version == 1 { - pe.putInt64(r.timestamp) - } else if r.timestamp != 0 { + pe.putInt64(b.timestamp) + } else if b.timestamp != 0 { Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored") } - return pe.putString(r.metadata) + return pe.putString(b.metadata) } -func (r *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) { - if r.offset, err = pd.getInt64(); err != nil { +func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) { + if b.offset, err = pd.getInt64(); err != nil { return err } if version == 1 { - if r.timestamp, err = pd.getInt64(); err != nil { + if b.timestamp, err = pd.getInt64(); err != nil { return err } } - r.metadata, err = pd.getString() + b.metadata, err = pd.getString() return err } @@ -49,7 +49,7 @@ type OffsetCommitRequest struct { // Version can be: // - 0 (kafka 0.8.1 and later) // - 1 (kafka 0.8.2 and later) - // - 2 (kafka 0.8.3 and later) + // - 2 (kafka 0.9.0 and later) Version int16 blocks map[string]map[int32]*offsetCommitRequestBlock } @@ -103,7 +103,9 @@ func (r *OffsetCommitRequest) encode(pe packetEncoder) error { return nil } -func (r *OffsetCommitRequest) decode(pd packetDecoder) (err error) { +func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.ConsumerGroup, err = pd.getString(); err != nil { return err } @@ -164,6 +166,17 @@ func (r *OffsetCommitRequest) version() int16 { return r.Version } +func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_8_2_0 + case 2: + return V0_9_0_0 + default: + return minVersion + } +} + func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) { if r.blocks == nil { r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go index 573a3b6..7f277e7 100644 --- a/vendor/github.com/Shopify/sarama/offset_commit_response.go +++ b/vendor/github.com/Shopify/sarama/offset_commit_response.go @@ -35,7 +35,7 @@ func (r *OffsetCommitResponse) encode(pe packetEncoder) error { return nil } -func (r *OffsetCommitResponse) decode(pd packetDecoder) (err error) { +func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { numTopics, err := pd.getArrayLength() if err != nil || numTopics == 0 { return err @@ -71,3 +71,15 @@ func (r *OffsetCommitResponse) decode(pd packetDecoder) (err error) { return nil } + +func (r *OffsetCommitResponse) key() int16 { + return 8 +} + +func (r *OffsetCommitResponse) version() int16 { + return 0 +} + +func (r *OffsetCommitResponse) requiredVersion() KafkaVersion { + return minVersion +} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go index 30bbbbb..b19fe79 100644 --- a/vendor/github.com/Shopify/sarama/offset_fetch_request.go +++ b/vendor/github.com/Shopify/sarama/offset_fetch_request.go @@ -28,7 +28,8 @@ func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) { return nil } -func (r *OffsetFetchRequest) decode(pd packetDecoder) (err error) { +func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version if r.ConsumerGroup, err = pd.getString(); err != nil { return err } @@ -62,6 +63,15 @@ func (r *OffsetFetchRequest) version() int16 { return r.Version } +func (r *OffsetFetchRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_8_2_0 + default: + return minVersion + } +} + func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) { if r.partitions == nil { r.partitions = make(map[string][]int32) diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go index 93078c3..323220e 100644 --- a/vendor/github.com/Shopify/sarama/offset_fetch_response.go +++ b/vendor/github.com/Shopify/sarama/offset_fetch_response.go @@ -6,13 +6,13 @@ type OffsetFetchResponseBlock struct { Err KError } -func (r *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) { - r.Offset, err = pd.getInt64() +func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) { + b.Offset, err = pd.getInt64() if err != nil { return err } - r.Metadata, err = pd.getString() + b.Metadata, err = pd.getString() if err != nil { return err } @@ -21,20 +21,20 @@ func (r *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) { if err != nil { return err } - r.Err = KError(tmp) + b.Err = KError(tmp) return nil } -func (r *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) { - pe.putInt64(r.Offset) +func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) { + pe.putInt64(b.Offset) - err = pe.putString(r.Metadata) + err = pe.putString(b.Metadata) if err != nil { return err } - pe.putInt16(int16(r.Err)) + pe.putInt16(int16(b.Err)) return nil } @@ -64,7 +64,7 @@ func (r *OffsetFetchResponse) encode(pe packetEncoder) error { return nil } -func (r *OffsetFetchResponse) decode(pd packetDecoder) (err error) { +func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) { numTopics, err := pd.getArrayLength() if err != nil || numTopics == 0 { return err @@ -106,6 +106,18 @@ func (r *OffsetFetchResponse) decode(pd packetDecoder) (err error) { return nil } +func (r *OffsetFetchResponse) key() int16 { + return 9 +} + +func (r *OffsetFetchResponse) version() int16 { + return 0 +} + +func (r *OffsetFetchResponse) requiredVersion() KafkaVersion { + return minVersion +} + func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock { if r.Blocks == nil { return nil diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go index 0cac648..6c01f95 100644 --- a/vendor/github.com/Shopify/sarama/offset_manager.go +++ b/vendor/github.com/Shopify/sarama/offset_manager.go @@ -136,17 +136,28 @@ type PartitionOffsetManager interface { // was committed for this partition yet. NextOffset() (int64, string) - // MarkOffset marks the provided offset as processed, alongside a metadata string + // MarkOffset marks the provided offset, alongside a metadata string // that represents the state of the partition consumer at that point in time. The // metadata string can be used by another consumer to restore that state, so it // can resume consumption. // + // To follow upstream conventions, you are expected to mark the offset of the + // next message to read, not the last message read. Thus, when calling `MarkOffset` + // you should typically add one to the offset of the last consumed message. + // // Note: calling MarkOffset does not necessarily commit the offset to the backend // store immediately for efficiency reasons, and it may never be committed if // your application crashes. This means that you may end up processing the same // message twice, and your processing should ideally be idempotent. MarkOffset(offset int64, metadata string) + // ResetOffset resets to the provided offset, alongside a metadata string that + // represents the state of the partition consumer at that point in time. Reset + // acts as a counterpart to MarkOffset, the difference being that it allows to + // reset an offset to an earlier or smaller value, where MarkOffset only + // allows incrementing the offset. cf MarkOffset for more details. + ResetOffset(offset int64, metadata string) + // Errors returns a read channel of errors that occur during offset management, if // enabled. By default, errors are logged and not returned over this channel. If // you want to implement any custom error handling, set your config's @@ -176,7 +187,7 @@ type partitionOffsetManager struct { offset int64 metadata string dirty bool - clean chan none + clean sync.Cond broker *brokerOffsetManager errors chan *ConsumerError @@ -189,11 +200,11 @@ func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32 parent: om, topic: topic, partition: partition, - clean: make(chan none), errors: make(chan *ConsumerError, om.conf.ChannelBufferSize), rebalance: make(chan none, 1), dying: make(chan none), } + pom.clean.L = &pom.lock if err := pom.selectBroker(); err != nil { return nil, err @@ -325,17 +336,24 @@ func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) { } } +func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if offset <= pom.offset { + pom.offset = offset + pom.metadata = metadata + pom.dirty = true + } +} + func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) { pom.lock.Lock() defer pom.lock.Unlock() if pom.offset == offset && pom.metadata == metadata { pom.dirty = false - - select { - case pom.clean <- none{}: - default: - } + pom.clean.Signal() } } @@ -344,7 +362,7 @@ func (pom *partitionOffsetManager) NextOffset() (int64, string) { defer pom.lock.Unlock() if pom.offset >= 0 { - return pom.offset + 1, pom.metadata + return pom.offset, pom.metadata } return pom.parent.conf.Consumer.Offsets.Initial, "" @@ -353,11 +371,10 @@ func (pom *partitionOffsetManager) NextOffset() (int64, string) { func (pom *partitionOffsetManager) AsyncClose() { go func() { pom.lock.Lock() - dirty := pom.dirty - pom.lock.Unlock() + defer pom.lock.Unlock() - if dirty { - <-pom.clean + for pom.dirty { + pom.clean.Wait() } close(pom.dying) @@ -462,7 +479,7 @@ func (bom *brokerOffsetManager) flushToBroker() { case ErrNoError: block := request.blocks[s.topic][s.partition] s.updateCommitted(block.offset, block.metadata) - case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, + case ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer: // not a critical error, we just need to redispatch delete(bom.subscriptions, s) @@ -473,6 +490,12 @@ func (bom *brokerOffsetManager) flushToBroker() { case ErrOffsetsLoadInProgress: // nothing wrong but we didn't commit, we'll get it next time round break + case ErrUnknownTopicOrPartition: + // let the user know *and* try redispatching - if topic-auto-create is + // enabled, redispatching should trigger a metadata request and create the + // topic; if not then re-dispatching won't help, but we've let the user + // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706) + fallthrough default: // dunno, tell the user and try redispatching s.handleError(err) @@ -484,7 +507,9 @@ func (bom *brokerOffsetManager) flushToBroker() { func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest { var r *OffsetCommitRequest + var perPartitionTimestamp int64 if bom.parent.conf.Consumer.Offsets.Retention == 0 { + perPartitionTimestamp = ReceiveTime r = &OffsetCommitRequest{ Version: 1, ConsumerGroup: bom.parent.group, @@ -503,7 +528,7 @@ func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest { for s := range bom.subscriptions { s.lock.Lock() if s.dirty { - r.AddBlock(s.topic, s.partition, s.offset, ReceiveTime, s.metadata) + r.AddBlock(s.topic, s.partition, s.offset, perPartitionTimestamp, s.metadata) } s.lock.Unlock() } diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go index 842d5c0..6c26960 100644 --- a/vendor/github.com/Shopify/sarama/offset_request.go +++ b/vendor/github.com/Shopify/sarama/offset_request.go @@ -2,27 +2,33 @@ package sarama type offsetRequestBlock struct { time int64 - maxOffsets int32 + maxOffsets int32 // Only used in version 0 } -func (r *offsetRequestBlock) encode(pe packetEncoder) error { - pe.putInt64(int64(r.time)) - pe.putInt32(r.maxOffsets) +func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error { + pe.putInt64(int64(b.time)) + if version == 0 { + pe.putInt32(b.maxOffsets) + } + return nil } -func (r *offsetRequestBlock) decode(pd packetDecoder) (err error) { - if r.time, err = pd.getInt64(); err != nil { +func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) { + if b.time, err = pd.getInt64(); err != nil { return err } - if r.maxOffsets, err = pd.getInt32(); err != nil { - return err + if version == 0 { + if b.maxOffsets, err = pd.getInt32(); err != nil { + return err + } } return nil } type OffsetRequest struct { - blocks map[string]map[int32]*offsetRequestBlock + Version int16 + blocks map[string]map[int32]*offsetRequestBlock } func (r *OffsetRequest) encode(pe packetEncoder) error { @@ -42,7 +48,7 @@ func (r *OffsetRequest) encode(pe packetEncoder) error { } for partition, block := range partitions { pe.putInt32(partition) - if err = block.encode(pe); err != nil { + if err = block.encode(pe, r.Version); err != nil { return err } } @@ -50,7 +56,9 @@ func (r *OffsetRequest) encode(pe packetEncoder) error { return nil } -func (r *OffsetRequest) decode(pd packetDecoder) error { +func (r *OffsetRequest) decode(pd packetDecoder, version int16) error { + r.Version = version + // Ignore replica ID if _, err := pd.getInt32(); err != nil { return err @@ -79,7 +87,7 @@ func (r *OffsetRequest) decode(pd packetDecoder) error { return err } block := &offsetRequestBlock{} - if err := block.decode(pd); err != nil { + if err := block.decode(pd, version); err != nil { return err } r.blocks[topic][partition] = block @@ -93,7 +101,16 @@ func (r *OffsetRequest) key() int16 { } func (r *OffsetRequest) version() int16 { - return 0 + return r.Version +} + +func (r *OffsetRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_10_1_0 + default: + return minVersion + } } func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) { @@ -107,7 +124,9 @@ func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, ma tmp := new(offsetRequestBlock) tmp.time = time - tmp.maxOffsets = maxOffsets + if r.Version == 0 { + tmp.maxOffsets = maxOffsets + } r.blocks[topic][partitionID] = tmp } diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go index 07d71ca..9a9cfe9 100644 --- a/vendor/github.com/Shopify/sarama/offset_response.go +++ b/vendor/github.com/Shopify/sarama/offset_response.go @@ -1,33 +1,60 @@ package sarama type OffsetResponseBlock struct { - Err KError - Offsets []int64 + Err KError + Offsets []int64 // Version 0 + Offset int64 // Version 1 + Timestamp int64 // Version 1 } -func (r *OffsetResponseBlock) decode(pd packetDecoder) (err error) { +func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) { tmp, err := pd.getInt16() if err != nil { return err } - r.Err = KError(tmp) + b.Err = KError(tmp) - r.Offsets, err = pd.getInt64Array() + if version == 0 { + b.Offsets, err = pd.getInt64Array() - return err + return err + } + + b.Timestamp, err = pd.getInt64() + if err != nil { + return err + } + + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + // For backwards compatibility put the offset in the offsets array too + b.Offsets = []int64{b.Offset} + + return nil } -func (r *OffsetResponseBlock) encode(pe packetEncoder) (err error) { - pe.putInt16(int16(r.Err)) +func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(b.Err)) + + if version == 0 { + return pe.putInt64Array(b.Offsets) + } + + pe.putInt64(b.Timestamp) + pe.putInt64(b.Offset) - return pe.putInt64Array(r.Offsets) + return nil } type OffsetResponse struct { - Blocks map[string]map[int32]*OffsetResponseBlock + Version int16 + Blocks map[string]map[int32]*OffsetResponseBlock } -func (r *OffsetResponse) decode(pd packetDecoder) (err error) { +func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) { numTopics, err := pd.getArrayLength() if err != nil { return err @@ -54,7 +81,7 @@ func (r *OffsetResponse) decode(pd packetDecoder) (err error) { } block := new(OffsetResponseBlock) - err = block.decode(pd) + err = block.decode(pd, version) if err != nil { return err } @@ -106,7 +133,7 @@ func (r *OffsetResponse) encode(pe packetEncoder) (err error) { } for partition, block := range partitions { pe.putInt32(partition) - if err = block.encode(pe); err != nil { + if err = block.encode(pe, r.version()); err != nil { return err } } @@ -115,6 +142,23 @@ func (r *OffsetResponse) encode(pe packetEncoder) (err error) { return nil } +func (r *OffsetResponse) key() int16 { + return 2 +} + +func (r *OffsetResponse) version() int16 { + return r.Version +} + +func (r *OffsetResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_10_1_0 + default: + return minVersion + } +} + // testing API func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) { @@ -126,5 +170,5 @@ func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset byTopic = make(map[int32]*OffsetResponseBlock) r.Blocks[topic] = byTopic } - byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}} + byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset} } diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go index 28670c0..74805cc 100644 --- a/vendor/github.com/Shopify/sarama/packet_decoder.go +++ b/vendor/github.com/Shopify/sarama/packet_decoder.go @@ -9,11 +9,16 @@ type packetDecoder interface { getInt16() (int16, error) getInt32() (int32, error) getInt64() (int64, error) + getVarint() (int64, error) getArrayLength() (int, error) + getBool() (bool, error) // Collections getBytes() ([]byte, error) + getVarintBytes() ([]byte, error) + getRawBytes(length int) ([]byte, error) getString() (string, error) + getNullableString() (*string, error) getInt32Array() ([]int32, error) getInt64Array() ([]int64, error) getStringArray() ([]string, error) @@ -21,6 +26,7 @@ type packetDecoder interface { // Subsets remaining() int getSubset(length int) (packetDecoder, error) + peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset // Stacks, see PushDecoder push(in pushDecoder) error @@ -43,3 +49,12 @@ type pushDecoder interface { // of data from the saved offset, and verify it based on the data between the saved offset and curOffset. check(curOffset int, buf []byte) error } + +// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the +// fields itself is unknown until its value was decoded (for instance varint encoded length +// fields). +// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength() +type dynamicPushDecoder interface { + pushDecoder + decoder +} diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go index 0df6e24..67b8dae 100644 --- a/vendor/github.com/Shopify/sarama/packet_encoder.go +++ b/vendor/github.com/Shopify/sarama/packet_encoder.go @@ -1,5 +1,7 @@ package sarama +import "github.com/rcrowley/go-metrics" + // PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules. // Types implementing Encoder only need to worry about calling methods like PutString, // not about how a string is represented in Kafka. @@ -9,19 +11,29 @@ type packetEncoder interface { putInt16(in int16) putInt32(in int32) putInt64(in int64) + putVarint(in int64) putArrayLength(in int) error + putBool(in bool) // Collections putBytes(in []byte) error + putVarintBytes(in []byte) error putRawBytes(in []byte) error putString(in string) error + putNullableString(in *string) error putStringArray(in []string) error putInt32Array(in []int32) error putInt64Array(in []int64) error + // Provide the current offset to record the batch size metric + offset() int + // Stacks, see PushEncoder push(in pushEncoder) pop() error + + // To record metrics when provided + metricRegistry() metrics.Registry } // PushEncoder is the interface for encoding fields like CRCs and lengths where the value @@ -40,3 +52,14 @@ type pushEncoder interface { // of data to the saved offset, based on the data between the saved offset and curOffset. run(curOffset int, buf []byte) error } + +// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the +// fields itself is unknown until its value was computed (for instance varint encoded length +// fields). +type dynamicPushEncoder interface { + pushEncoder + + // Called during pop() to adjust the length of the field. + // It should return the difference in bytes between the last computed length and current length. + adjustLength(currOffset int) int +} diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go index 8c2bca4..9729327 100644 --- a/vendor/github.com/Shopify/sarama/partitioner.go +++ b/vendor/github.com/Shopify/sarama/partitioner.go @@ -87,9 +87,21 @@ type hashPartitioner struct { hasher hash.Hash32 } -// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil, or fails to -// encode, then a random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key -// is used, modulus the number of partitions. This ensures that messages with the same key always end up on the +// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher. +// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that +// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance. +func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor { + return func(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = hasher() + return p + } +} + +// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a +// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used, +// modulus the number of partitions. This ensures that messages with the same key always end up on the // same partition. func NewHashPartitioner(topic string) Partitioner { p := new(hashPartitioner) @@ -111,11 +123,11 @@ func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int3 if err != nil { return -1, err } - hash := int32(p.hasher.Sum32()) - if hash < 0 { - hash = -hash + partition := int32(p.hasher.Sum32()) % numPartitions + if partition < 0 { + partition = -partition } - return hash % numPartitions, nil + return partition, nil } func (p *hashPartitioner) RequiresConsistency() bool { diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go index 8c6ba85..b633cd1 100644 --- a/vendor/github.com/Shopify/sarama/prep_encoder.go +++ b/vendor/github.com/Shopify/sarama/prep_encoder.go @@ -1,11 +1,15 @@ package sarama import ( + "encoding/binary" "fmt" "math" + + "github.com/rcrowley/go-metrics" ) type prepEncoder struct { + stack []pushEncoder length int } @@ -27,6 +31,11 @@ func (pe *prepEncoder) putInt64(in int64) { pe.length += 8 } +func (pe *prepEncoder) putVarint(in int64) { + var buf [binary.MaxVarintLen64]byte + pe.length += binary.PutVarint(buf[:], in) +} + func (pe *prepEncoder) putArrayLength(in int) error { if in > math.MaxInt32 { return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)} @@ -35,6 +44,10 @@ func (pe *prepEncoder) putArrayLength(in int) error { return nil } +func (pe *prepEncoder) putBool(in bool) { + pe.length++ +} + // arrays func (pe *prepEncoder) putBytes(in []byte) error { @@ -42,11 +55,16 @@ func (pe *prepEncoder) putBytes(in []byte) error { if in == nil { return nil } - if len(in) > math.MaxInt32 { - return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} + return pe.putRawBytes(in) +} + +func (pe *prepEncoder) putVarintBytes(in []byte) error { + if in == nil { + pe.putVarint(-1) + return nil } - pe.length += len(in) - return nil + pe.putVarint(int64(len(in))) + return pe.putRawBytes(in) } func (pe *prepEncoder) putRawBytes(in []byte) error { @@ -57,6 +75,14 @@ func (pe *prepEncoder) putRawBytes(in []byte) error { return nil } +func (pe *prepEncoder) putNullableString(in *string) error { + if in == nil { + pe.length += 2 + return nil + } + return pe.putString(*in) +} + func (pe *prepEncoder) putString(in string) error { pe.length += 2 if len(in) > math.MaxInt16 { @@ -99,12 +125,29 @@ func (pe *prepEncoder) putInt64Array(in []int64) error { return nil } +func (pe *prepEncoder) offset() int { + return pe.length +} + // stackable func (pe *prepEncoder) push(in pushEncoder) { + in.saveOffset(pe.length) pe.length += in.reserveLength() + pe.stack = append(pe.stack, in) } func (pe *prepEncoder) pop() error { + in := pe.stack[len(pe.stack)-1] + pe.stack = pe.stack[:len(pe.stack)-1] + if dpe, ok := in.(dynamicPushEncoder); ok { + pe.length += dpe.adjustLength(pe.length) + } + + return nil +} + +// we do not record metrics during the prep encoder pass +func (pe *prepEncoder) metricRegistry() metrics.Registry { return nil } diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go index 473513c..0ec4d8d 100644 --- a/vendor/github.com/Shopify/sarama/produce_request.go +++ b/vendor/github.com/Shopify/sarama/produce_request.go @@ -1,5 +1,7 @@ package sarama +import "github.com/rcrowley/go-metrics" + // RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements // it must see before responding. Any of the constants defined here are valid. On broker versions // prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many @@ -12,24 +14,78 @@ const ( NoResponse RequiredAcks = 0 // WaitForLocal waits for only the local commit to succeed before responding. WaitForLocal RequiredAcks = 1 - // WaitForAll waits for all replicas to commit before responding. + // WaitForAll waits for all in-sync replicas to commit before responding. + // The minimum number of in-sync replicas is configured on the broker via + // the `min.insync.replicas` configuration key. WaitForAll RequiredAcks = -1 ) type ProduceRequest struct { - RequiredAcks RequiredAcks - Timeout int32 - msgSets map[string]map[int32]*MessageSet + TransactionalID *string + RequiredAcks RequiredAcks + Timeout int32 + Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11 + records map[string]map[int32]Records +} + +func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram, + topicCompressionRatioMetric metrics.Histogram) int64 { + var topicRecordCount int64 + for _, messageBlock := range msgSet.Messages { + // Is this a fake "message" wrapping real messages? + if messageBlock.Msg.Set != nil { + topicRecordCount += int64(len(messageBlock.Msg.Set.Messages)) + } else { + // A single uncompressed message + topicRecordCount++ + } + // Better be safe than sorry when computing the compression ratio + if messageBlock.Msg.compressedSize != 0 { + compressionRatio := float64(len(messageBlock.Msg.Value)) / + float64(messageBlock.Msg.compressedSize) + // Histogram do not support decimal values, let's multiple it by 100 for better precision + intCompressionRatio := int64(100 * compressionRatio) + compressionRatioMetric.Update(intCompressionRatio) + topicCompressionRatioMetric.Update(intCompressionRatio) + } + } + return topicRecordCount +} + +func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram, + topicCompressionRatioMetric metrics.Histogram) int64 { + if recordBatch.compressedRecords != nil { + compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100) + compressionRatioMetric.Update(compressionRatio) + topicCompressionRatioMetric.Update(compressionRatio) + } + + return int64(len(recordBatch.Records)) } -func (p *ProduceRequest) encode(pe packetEncoder) error { - pe.putInt16(int16(p.RequiredAcks)) - pe.putInt32(p.Timeout) - err := pe.putArrayLength(len(p.msgSets)) +func (r *ProduceRequest) encode(pe packetEncoder) error { + if r.Version >= 3 { + if err := pe.putNullableString(r.TransactionalID); err != nil { + return err + } + } + pe.putInt16(int16(r.RequiredAcks)) + pe.putInt32(r.Timeout) + metricRegistry := pe.metricRegistry() + var batchSizeMetric metrics.Histogram + var compressionRatioMetric metrics.Histogram + if metricRegistry != nil { + batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry) + compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry) + } + totalRecordCount := int64(0) + + err := pe.putArrayLength(len(r.records)) if err != nil { return err } - for topic, partitions := range p.msgSets { + + for topic, partitions := range r.records { err = pe.putString(topic) if err != nil { return err @@ -38,10 +94,16 @@ func (p *ProduceRequest) encode(pe packetEncoder) error { if err != nil { return err } - for id, msgSet := range partitions { + topicRecordCount := int64(0) + var topicCompressionRatioMetric metrics.Histogram + if metricRegistry != nil { + topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry) + } + for id, records := range partitions { + startOffset := pe.offset() pe.putInt32(id) pe.push(&lengthField{}) - err = msgSet.encode(pe) + err = records.encode(pe) if err != nil { return err } @@ -49,18 +111,47 @@ func (p *ProduceRequest) encode(pe packetEncoder) error { if err != nil { return err } + if metricRegistry != nil { + if r.Version >= 3 { + topicRecordCount += updateBatchMetrics(records.recordBatch, compressionRatioMetric, topicCompressionRatioMetric) + } else { + topicRecordCount += updateMsgSetMetrics(records.msgSet, compressionRatioMetric, topicCompressionRatioMetric) + } + batchSize := int64(pe.offset() - startOffset) + batchSizeMetric.Update(batchSize) + getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize) + } } + if topicRecordCount > 0 { + getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount) + getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount) + totalRecordCount += topicRecordCount + } + } + if totalRecordCount > 0 { + metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount) + getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount) } + return nil } -func (p *ProduceRequest) decode(pd packetDecoder) error { +func (r *ProduceRequest) decode(pd packetDecoder, version int16) error { + r.Version = version + + if version >= 3 { + id, err := pd.getNullableString() + if err != nil { + return err + } + r.TransactionalID = id + } requiredAcks, err := pd.getInt16() if err != nil { return err } - p.RequiredAcks = RequiredAcks(requiredAcks) - if p.Timeout, err = pd.getInt32(); err != nil { + r.RequiredAcks = RequiredAcks(requiredAcks) + if r.Timeout, err = pd.getInt32(); err != nil { return err } topicCount, err := pd.getArrayLength() @@ -70,7 +161,8 @@ func (p *ProduceRequest) decode(pd packetDecoder) error { if topicCount == 0 { return nil } - p.msgSets = make(map[string]map[int32]*MessageSet) + + r.records = make(map[string]map[int32]Records) for i := 0; i < topicCount; i++ { topic, err := pd.getString() if err != nil { @@ -80,66 +172,81 @@ func (p *ProduceRequest) decode(pd packetDecoder) error { if err != nil { return err } - p.msgSets[topic] = make(map[int32]*MessageSet) + r.records[topic] = make(map[int32]Records) + for j := 0; j < partitionCount; j++ { partition, err := pd.getInt32() if err != nil { return err } - messageSetSize, err := pd.getInt32() + size, err := pd.getInt32() if err != nil { return err } - msgSetDecoder, err := pd.getSubset(int(messageSetSize)) + recordsDecoder, err := pd.getSubset(int(size)) if err != nil { return err } - msgSet := &MessageSet{} - err = msgSet.decode(msgSetDecoder) - if err != nil { + var records Records + if err := records.decode(recordsDecoder); err != nil { return err } - p.msgSets[topic][partition] = msgSet + r.records[topic][partition] = records } } + return nil } -func (p *ProduceRequest) key() int16 { +func (r *ProduceRequest) key() int16 { return 0 } -func (p *ProduceRequest) version() int16 { - return 0 +func (r *ProduceRequest) version() int16 { + return r.Version } -func (p *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) { - if p.msgSets == nil { - p.msgSets = make(map[string]map[int32]*MessageSet) +func (r *ProduceRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + case 3: + return V0_11_0_0 + default: + return minVersion } +} - if p.msgSets[topic] == nil { - p.msgSets[topic] = make(map[int32]*MessageSet) +func (r *ProduceRequest) ensureRecords(topic string, partition int32) { + if r.records == nil { + r.records = make(map[string]map[int32]Records) } - set := p.msgSets[topic][partition] + if r.records[topic] == nil { + r.records[topic] = make(map[int32]Records) + } +} + +func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) { + r.ensureRecords(topic, partition) + set := r.records[topic][partition].msgSet if set == nil { set = new(MessageSet) - p.msgSets[topic][partition] = set + r.records[topic][partition] = newLegacyRecords(set) } set.addMessage(msg) } -func (p *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) { - if p.msgSets == nil { - p.msgSets = make(map[string]map[int32]*MessageSet) - } - - if p.msgSets[topic] == nil { - p.msgSets[topic] = make(map[int32]*MessageSet) - } +func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) { + r.ensureRecords(topic, partition) + r.records[topic][partition] = newLegacyRecords(set) +} - p.msgSets[topic][partition] = set +func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) { + r.ensureRecords(topic, partition) + r.records[topic][partition] = newDefaultRecords(batch) } diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go index 1f49a85..043c40f 100644 --- a/vendor/github.com/Shopify/sarama/produce_response.go +++ b/vendor/github.com/Shopify/sarama/produce_response.go @@ -1,36 +1,72 @@ package sarama +import ( + "fmt" + "time" +) + type ProduceResponseBlock struct { Err KError Offset int64 + // only provided if Version >= 2 and the broker is configured with `LogAppendTime` + Timestamp time.Time } -func (pr *ProduceResponseBlock) decode(pd packetDecoder) (err error) { +func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) { tmp, err := pd.getInt16() if err != nil { return err } - pr.Err = KError(tmp) + b.Err = KError(tmp) - pr.Offset, err = pd.getInt64() + b.Offset, err = pd.getInt64() if err != nil { return err } + if version >= 2 { + if millis, err := pd.getInt64(); err != nil { + return err + } else if millis != -1 { + b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) + } + } + + return nil +} + +func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(b.Err)) + pe.putInt64(b.Offset) + + if version >= 2 { + timestamp := int64(-1) + if !b.Timestamp.Before(time.Unix(0, 0)) { + timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond) + } else if !b.Timestamp.IsZero() { + return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)} + } + pe.putInt64(timestamp) + } + return nil } type ProduceResponse struct { - Blocks map[string]map[int32]*ProduceResponseBlock + Blocks map[string]map[int32]*ProduceResponseBlock + Version int16 + ThrottleTime time.Duration // only provided if Version >= 1 } -func (pr *ProduceResponse) decode(pd packetDecoder) (err error) { +func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + numTopics, err := pd.getArrayLength() if err != nil { return err } - pr.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics) + r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics) for i := 0; i < numTopics; i++ { name, err := pd.getString() if err != nil { @@ -42,7 +78,7 @@ func (pr *ProduceResponse) decode(pd packetDecoder) (err error) { return err } - pr.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks) + r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks) for j := 0; j < numBlocks; j++ { id, err := pd.getInt32() @@ -51,23 +87,32 @@ func (pr *ProduceResponse) decode(pd packetDecoder) (err error) { } block := new(ProduceResponseBlock) - err = block.decode(pd) + err = block.decode(pd, version) if err != nil { return err } - pr.Blocks[name][id] = block + r.Blocks[name][id] = block + } + } + + if r.Version >= 1 { + millis, err := pd.getInt32() + if err != nil { + return err } + + r.ThrottleTime = time.Duration(millis) * time.Millisecond } return nil } -func (pr *ProduceResponse) encode(pe packetEncoder) error { - err := pe.putArrayLength(len(pr.Blocks)) +func (r *ProduceResponse) encode(pe packetEncoder) error { + err := pe.putArrayLength(len(r.Blocks)) if err != nil { return err } - for topic, partitions := range pr.Blocks { + for topic, partitions := range r.Blocks { err = pe.putString(topic) if err != nil { return err @@ -78,35 +123,61 @@ func (pr *ProduceResponse) encode(pe packetEncoder) error { } for id, prb := range partitions { pe.putInt32(id) - pe.putInt16(int16(prb.Err)) - pe.putInt64(prb.Offset) + err = prb.encode(pe, r.Version) + if err != nil { + return err + } } } + if r.Version >= 1 { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + } return nil } -func (pr *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock { - if pr.Blocks == nil { +func (r *ProduceResponse) key() int16 { + return 0 +} + +func (r *ProduceResponse) version() int16 { + return r.Version +} + +func (r *ProduceResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + case 3: + return V0_11_0_0 + default: + return minVersion + } +} + +func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock { + if r.Blocks == nil { return nil } - if pr.Blocks[topic] == nil { + if r.Blocks[topic] == nil { return nil } - return pr.Blocks[topic][partition] + return r.Blocks[topic][partition] } // Testing API -func (pr *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) { - if pr.Blocks == nil { - pr.Blocks = make(map[string]map[int32]*ProduceResponseBlock) +func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*ProduceResponseBlock) } - byTopic, ok := pr.Blocks[topic] + byTopic, ok := r.Blocks[topic] if !ok { byTopic = make(map[int32]*ProduceResponseBlock) - pr.Blocks[topic] = byTopic + r.Blocks[topic] = byTopic } byTopic[partition] = &ProduceResponseBlock{Err: err} } diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go index 9fe5f79..b9eac95 100644 --- a/vendor/github.com/Shopify/sarama/produce_set.go +++ b/vendor/github.com/Shopify/sarama/produce_set.go @@ -1,11 +1,14 @@ package sarama -import "time" +import ( + "encoding/binary" + "time" +) type partitionSet struct { - msgs []*ProducerMessage - setToSend *MessageSet - bufferBytes int + msgs []*ProducerMessage + recordsToSend Records + bufferBytes int } type produceSet struct { @@ -39,22 +42,64 @@ func (ps *produceSet) add(msg *ProducerMessage) error { } } + timestamp := msg.Timestamp + if msg.Timestamp.IsZero() { + timestamp = time.Now() + } + partitions := ps.msgs[msg.Topic] if partitions == nil { partitions = make(map[int32]*partitionSet) ps.msgs[msg.Topic] = partitions } + var size int + set := partitions[msg.Partition] if set == nil { - set = &partitionSet{setToSend: new(MessageSet)} + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + batch := &RecordBatch{ + FirstTimestamp: timestamp, + Version: 2, + ProducerID: -1, /* No producer id */ + Codec: ps.parent.conf.Producer.Compression, + } + set = &partitionSet{recordsToSend: newDefaultRecords(batch)} + size = recordBatchOverhead + } else { + set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))} + } partitions[msg.Partition] = set } set.msgs = append(set.msgs, msg) - set.setToSend.addMessage(&Message{Codec: CompressionNone, Key: key, Value: val}) + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + // We are being conservative here to avoid having to prep encode the record + size += maximumRecordOverhead + rec := &Record{ + Key: key, + Value: val, + TimestampDelta: timestamp.Sub(set.recordsToSend.recordBatch.FirstTimestamp), + } + size += len(key) + len(val) + if len(msg.Headers) > 0 { + rec.Headers = make([]*RecordHeader, len(msg.Headers)) + for i := range msg.Headers { + rec.Headers[i] = &msg.Headers[i] + size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32 + } + } + set.recordsToSend.recordBatch.addRecord(rec) + } else { + msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val} + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + msgToSend.Timestamp = timestamp + msgToSend.Version = 1 + } + set.recordsToSend.msgSet.addMessage(msgToSend) + size = producerMessageOverhead + len(key) + len(val) + } - size := producerMessageOverhead + len(key) + len(val) set.bufferBytes += size ps.bufferBytes += size ps.bufferCount++ @@ -67,26 +112,57 @@ func (ps *produceSet) buildRequest() *ProduceRequest { RequiredAcks: ps.parent.conf.Producer.RequiredAcks, Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond), } + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + req.Version = 2 + } + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + req.Version = 3 + } for topic, partitionSet := range ps.msgs { for partition, set := range partitionSet { + if req.Version >= 3 { + for i, record := range set.recordsToSend.recordBatch.Records { + record.OffsetDelta = int64(i) + } + + req.AddBatch(topic, partition, set.recordsToSend.recordBatch) + continue + } if ps.parent.conf.Producer.Compression == CompressionNone { - req.AddSet(topic, partition, set.setToSend) + req.AddSet(topic, partition, set.recordsToSend.msgSet) } else { // When compression is enabled, the entire set for each partition is compressed // and sent as the payload of a single fake "message" with the appropriate codec // set and no key. When the server sees a message with a compression codec, it // decompresses the payload and treats the result as its message set. - payload, err := encode(set.setToSend) + + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + // If our version is 0.10 or later, assign relative offsets + // to the inner messages. This lets the broker avoid + // recompressing the message set. + // (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets + // for details on relative offsets.) + for i, msg := range set.recordsToSend.msgSet.Messages { + msg.Offset = int64(i) + } + } + payload, err := encode(set.recordsToSend.msgSet, ps.parent.conf.MetricRegistry) if err != nil { Logger.Println(err) // if this happens, it's basically our fault. panic(err) } - req.AddMessage(topic, partition, &Message{ + compMsg := &Message{ Codec: ps.parent.conf.Producer.Compression, Key: nil, Value: payload, - }) + Set: set.recordsToSend.msgSet, // Provide the underlying message set for accurate metrics + } + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + compMsg.Version = 1 + compMsg.Timestamp = set.recordsToSend.msgSet.Messages[0].Msg.Timestamp + } + req.AddMessage(topic, partition, compMsg) } } } @@ -117,14 +193,19 @@ func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMe } func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool { + version := 1 + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + version = 2 + } + switch { // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety. - case ps.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)): + case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)): return true // Would we overflow the size-limit of a compressed message-batch for this partition? case ps.parent.conf.Producer.Compression != CompressionNone && ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil && - ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize() >= ps.parent.conf.Producer.MaxMessageBytes: + ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes: return true // Would we overflow simply in number of messages? case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages: diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go index e3ea331..23045e7 100644 --- a/vendor/github.com/Shopify/sarama/real_decoder.go +++ b/vendor/github.com/Shopify/sarama/real_decoder.go @@ -5,6 +5,14 @@ import ( "math" ) +var errInvalidArrayLength = PacketDecodingError{"invalid array length"} +var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"} +var errInvalidByteSliceLengthType = PacketDecodingError{"invalid byteslice length type"} +var errInvalidStringLength = PacketDecodingError{"invalid string length"} +var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"} +var errVarintOverflow = PacketDecodingError{"varint overflow"} +var errInvalidBool = PacketDecodingError{"invalid bool"} + type realDecoder struct { raw []byte off int @@ -53,69 +61,96 @@ func (rd *realDecoder) getInt64() (int64, error) { return tmp, nil } +func (rd *realDecoder) getVarint() (int64, error) { + tmp, n := binary.Varint(rd.raw[rd.off:]) + if n == 0 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + if n < 0 { + rd.off -= n + return -1, errVarintOverflow + } + rd.off += n + return tmp, nil +} + func (rd *realDecoder) getArrayLength() (int, error) { if rd.remaining() < 4 { rd.off = len(rd.raw) return -1, ErrInsufficientData } - tmp := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))) rd.off += 4 if tmp > rd.remaining() { rd.off = len(rd.raw) return -1, ErrInsufficientData } else if tmp > 2*math.MaxUint16 { - return -1, PacketDecodingError{"invalid array length"} + return -1, errInvalidArrayLength } return tmp, nil } +func (rd *realDecoder) getBool() (bool, error) { + b, err := rd.getInt8() + if err != nil || b == 0 { + return false, err + } + if b != 1 { + return false, errInvalidBool + } + return true, nil +} + // collections func (rd *realDecoder) getBytes() ([]byte, error) { tmp, err := rd.getInt32() - if err != nil { return nil, err } + if tmp == -1 { + return nil, nil + } - n := int(tmp) + return rd.getRawBytes(int(tmp)) +} - switch { - case n < -1: - return nil, PacketDecodingError{"invalid byteslice length"} - case n == -1: +func (rd *realDecoder) getVarintBytes() ([]byte, error) { + tmp, err := rd.getVarint() + if err != nil { + return nil, err + } + if tmp == -1 { return nil, nil - case n == 0: - return make([]byte, 0), nil - case n > rd.remaining(): - rd.off = len(rd.raw) - return nil, ErrInsufficientData } - tmpStr := rd.raw[rd.off : rd.off+n] - rd.off += n - return tmpStr, nil + return rd.getRawBytes(int(tmp)) } -func (rd *realDecoder) getString() (string, error) { - tmp, err := rd.getInt16() - +func (rd *realDecoder) getStringLength() (int, error) { + length, err := rd.getInt16() if err != nil { - return "", err + return 0, err } - n := int(tmp) + n := int(length) switch { case n < -1: - return "", PacketDecodingError{"invalid string length"} - case n == -1: - return "", nil - case n == 0: - return "", nil + return 0, errInvalidStringLength case n > rd.remaining(): rd.off = len(rd.raw) - return "", ErrInsufficientData + return 0, ErrInsufficientData + } + + return n, nil +} + +func (rd *realDecoder) getString() (string, error) { + n, err := rd.getStringLength() + if err != nil || n == -1 { + return "", err } tmpStr := string(rd.raw[rd.off : rd.off+n]) @@ -123,6 +158,17 @@ func (rd *realDecoder) getString() (string, error) { return tmpStr, nil } +func (rd *realDecoder) getNullableString() (*string, error) { + n, err := rd.getStringLength() + if err != nil || n == -1 { + return nil, err + } + + tmpStr := string(rd.raw[rd.off : rd.off+n]) + rd.off += n + return &tmpStr, err +} + func (rd *realDecoder) getInt32Array() ([]int32, error) { if rd.remaining() < 4 { rd.off = len(rd.raw) @@ -141,7 +187,7 @@ func (rd *realDecoder) getInt32Array() ([]int32, error) { } if n < 0 { - return nil, PacketDecodingError{"invalid array length"} + return nil, errInvalidArrayLength } ret := make([]int32, n) @@ -170,7 +216,7 @@ func (rd *realDecoder) getInt64Array() ([]int64, error) { } if n < 0 { - return nil, PacketDecodingError{"invalid array length"} + return nil, errInvalidArrayLength } ret := make([]int64, n) @@ -194,16 +240,17 @@ func (rd *realDecoder) getStringArray() ([]string, error) { } if n < 0 { - return nil, PacketDecodingError{"invalid array length"} + return nil, errInvalidArrayLength } ret := make([]string, n) for i := range ret { - if str, err := rd.getString(); err != nil { + str, err := rd.getString() + if err != nil { return nil, err - } else { - ret[i] = str } + + ret[i] = str } return ret, nil } @@ -215,8 +262,16 @@ func (rd *realDecoder) remaining() int { } func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { + buf, err := rd.getRawBytes(length) + if err != nil { + return nil, err + } + return &realDecoder{raw: buf}, nil +} + +func (rd *realDecoder) getRawBytes(length int) ([]byte, error) { if length < 0 { - return nil, PacketDecodingError{"invalid subset size"} + return nil, errInvalidByteSliceLength } else if length > rd.remaining() { rd.off = len(rd.raw) return nil, ErrInsufficientData @@ -224,7 +279,15 @@ func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { start := rd.off rd.off += length - return &realDecoder{raw: rd.raw[start:rd.off]}, nil + return rd.raw[start:rd.off], nil +} + +func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) { + if rd.remaining() < offset+length { + return nil, ErrInsufficientData + } + off := rd.off + offset + return &realDecoder{raw: rd.raw[off : off+length]}, nil } // stacks @@ -232,10 +295,17 @@ func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { func (rd *realDecoder) push(in pushDecoder) error { in.saveOffset(rd.off) - reserve := in.reserveLength() - if rd.remaining() < reserve { - rd.off = len(rd.raw) - return ErrInsufficientData + var reserve int + if dpd, ok := in.(dynamicPushDecoder); ok { + if err := dpd.decode(rd); err != nil { + return err + } + } else { + reserve = in.reserveLength() + if rd.remaining() < reserve { + rd.off = len(rd.raw) + return ErrInsufficientData + } } rd.stack = append(rd.stack, in) diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go index 076fdd0..3c75387 100644 --- a/vendor/github.com/Shopify/sarama/real_encoder.go +++ b/vendor/github.com/Shopify/sarama/real_encoder.go @@ -1,11 +1,16 @@ package sarama -import "encoding/binary" +import ( + "encoding/binary" + + "github.com/rcrowley/go-metrics" +) type realEncoder struct { - raw []byte - off int - stack []pushEncoder + raw []byte + off int + stack []pushEncoder + registry metrics.Registry } // primitives @@ -30,11 +35,23 @@ func (re *realEncoder) putInt64(in int64) { re.off += 8 } +func (re *realEncoder) putVarint(in int64) { + re.off += binary.PutVarint(re.raw[re.off:], in) +} + func (re *realEncoder) putArrayLength(in int) error { re.putInt32(int32(in)) return nil } +func (re *realEncoder) putBool(in bool) { + if in { + re.putInt8(1) + return + } + re.putInt8(0) +} + // collection func (re *realEncoder) putRawBytes(in []byte) error { @@ -49,9 +66,16 @@ func (re *realEncoder) putBytes(in []byte) error { return nil } re.putInt32(int32(len(in))) - copy(re.raw[re.off:], in) - re.off += len(in) - return nil + return re.putRawBytes(in) +} + +func (re *realEncoder) putVarintBytes(in []byte) error { + if in == nil { + re.putVarint(-1) + return nil + } + re.putVarint(int64(len(in))) + return re.putRawBytes(in) } func (re *realEncoder) putString(in string) error { @@ -61,6 +85,14 @@ func (re *realEncoder) putString(in string) error { return nil } +func (re *realEncoder) putNullableString(in *string) error { + if in == nil { + re.putInt16(-1) + return nil + } + return re.putString(*in) +} + func (re *realEncoder) putStringArray(in []string) error { err := re.putArrayLength(len(in)) if err != nil { @@ -98,6 +130,10 @@ func (re *realEncoder) putInt64Array(in []int64) error { return nil } +func (re *realEncoder) offset() int { + return re.off +} + // stacks func (re *realEncoder) push(in pushEncoder) { @@ -113,3 +149,8 @@ func (re *realEncoder) pop() error { return in.run(re.off, re.raw) } + +// we do record metrics during the real encoder pass +func (re *realEncoder) metricRegistry() metrics.Registry { + return re.registry +} diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/Shopify/sarama/record.go new file mode 100644 index 0000000..cded308 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/record.go @@ -0,0 +1,113 @@ +package sarama + +import ( + "encoding/binary" + "time" +) + +const ( + controlMask = 0x20 + maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1 +) + +type RecordHeader struct { + Key []byte + Value []byte +} + +func (h *RecordHeader) encode(pe packetEncoder) error { + if err := pe.putVarintBytes(h.Key); err != nil { + return err + } + return pe.putVarintBytes(h.Value) +} + +func (h *RecordHeader) decode(pd packetDecoder) (err error) { + if h.Key, err = pd.getVarintBytes(); err != nil { + return err + } + + if h.Value, err = pd.getVarintBytes(); err != nil { + return err + } + return nil +} + +type Record struct { + Attributes int8 + TimestampDelta time.Duration + OffsetDelta int64 + Key []byte + Value []byte + Headers []*RecordHeader + + length varintLengthField +} + +func (r *Record) encode(pe packetEncoder) error { + pe.push(&r.length) + pe.putInt8(r.Attributes) + pe.putVarint(int64(r.TimestampDelta / time.Millisecond)) + pe.putVarint(r.OffsetDelta) + if err := pe.putVarintBytes(r.Key); err != nil { + return err + } + if err := pe.putVarintBytes(r.Value); err != nil { + return err + } + pe.putVarint(int64(len(r.Headers))) + + for _, h := range r.Headers { + if err := h.encode(pe); err != nil { + return err + } + } + + return pe.pop() +} + +func (r *Record) decode(pd packetDecoder) (err error) { + if err = pd.push(&r.length); err != nil { + return err + } + + if r.Attributes, err = pd.getInt8(); err != nil { + return err + } + + timestamp, err := pd.getVarint() + if err != nil { + return err + } + r.TimestampDelta = time.Duration(timestamp) * time.Millisecond + + if r.OffsetDelta, err = pd.getVarint(); err != nil { + return err + } + + if r.Key, err = pd.getVarintBytes(); err != nil { + return err + } + + if r.Value, err = pd.getVarintBytes(); err != nil { + return err + } + + numHeaders, err := pd.getVarint() + if err != nil { + return err + } + + if numHeaders >= 0 { + r.Headers = make([]*RecordHeader, numHeaders) + } + for i := int64(0); i < numHeaders; i++ { + hdr := new(RecordHeader) + if err := hdr.decode(pd); err != nil { + return err + } + r.Headers[i] = hdr + } + + return pd.pop() +} diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go new file mode 100644 index 0000000..a8c533b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/record_batch.go @@ -0,0 +1,265 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "time" + + "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4" +) + +const recordBatchOverhead = 49 + +type recordsArray []*Record + +func (e recordsArray) encode(pe packetEncoder) error { + for _, r := range e { + if err := r.encode(pe); err != nil { + return err + } + } + return nil +} + +func (e recordsArray) decode(pd packetDecoder) error { + for i := range e { + rec := &Record{} + if err := rec.decode(pd); err != nil { + return err + } + e[i] = rec + } + return nil +} + +type RecordBatch struct { + FirstOffset int64 + PartitionLeaderEpoch int32 + Version int8 + Codec CompressionCodec + Control bool + LastOffsetDelta int32 + FirstTimestamp time.Time + MaxTimestamp time.Time + ProducerID int64 + ProducerEpoch int16 + FirstSequence int32 + Records []*Record + PartialTrailingRecord bool + + compressedRecords []byte + recordsLen int // uncompressed records size +} + +func (b *RecordBatch) encode(pe packetEncoder) error { + if b.Version != 2 { + return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)} + } + pe.putInt64(b.FirstOffset) + pe.push(&lengthField{}) + pe.putInt32(b.PartitionLeaderEpoch) + pe.putInt8(b.Version) + pe.push(newCRC32Field(crcCastagnoli)) + pe.putInt16(b.computeAttributes()) + pe.putInt32(b.LastOffsetDelta) + + if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil { + return err + } + + if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil { + return err + } + + pe.putInt64(b.ProducerID) + pe.putInt16(b.ProducerEpoch) + pe.putInt32(b.FirstSequence) + + if err := pe.putArrayLength(len(b.Records)); err != nil { + return err + } + + if b.compressedRecords == nil { + if err := b.encodeRecords(pe); err != nil { + return err + } + } + if err := pe.putRawBytes(b.compressedRecords); err != nil { + return err + } + + if err := pe.pop(); err != nil { + return err + } + return pe.pop() +} + +func (b *RecordBatch) decode(pd packetDecoder) (err error) { + if b.FirstOffset, err = pd.getInt64(); err != nil { + return err + } + + batchLen, err := pd.getInt32() + if err != nil { + return err + } + + if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + + if b.Version, err = pd.getInt8(); err != nil { + return err + } + + if err = pd.push(&crc32Field{polynomial: crcCastagnoli}); err != nil { + return err + } + + attributes, err := pd.getInt16() + if err != nil { + return err + } + b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask) + b.Control = attributes&controlMask == controlMask + + if b.LastOffsetDelta, err = pd.getInt32(); err != nil { + return err + } + + if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil { + return err + } + + if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil { + return err + } + + if b.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if b.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + if b.FirstSequence, err = pd.getInt32(); err != nil { + return err + } + + numRecs, err := pd.getArrayLength() + if err != nil { + return err + } + if numRecs >= 0 { + b.Records = make([]*Record, numRecs) + } + + bufSize := int(batchLen) - recordBatchOverhead + recBuffer, err := pd.getRawBytes(bufSize) + if err != nil { + if err == ErrInsufficientData { + b.PartialTrailingRecord = true + b.Records = nil + return nil + } + return err + } + + if err = pd.pop(); err != nil { + return err + } + + switch b.Codec { + case CompressionNone: + case CompressionGZIP: + reader, err := gzip.NewReader(bytes.NewReader(recBuffer)) + if err != nil { + return err + } + if recBuffer, err = ioutil.ReadAll(reader); err != nil { + return err + } + case CompressionSnappy: + if recBuffer, err = snappy.Decode(recBuffer); err != nil { + return err + } + case CompressionLZ4: + reader := lz4.NewReader(bytes.NewReader(recBuffer)) + if recBuffer, err = ioutil.ReadAll(reader); err != nil { + return err + } + default: + return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", b.Codec)} + } + + b.recordsLen = len(recBuffer) + err = decode(recBuffer, recordsArray(b.Records)) + if err == ErrInsufficientData { + b.PartialTrailingRecord = true + b.Records = nil + return nil + } + return err +} + +func (b *RecordBatch) encodeRecords(pe packetEncoder) error { + var raw []byte + if b.Codec != CompressionNone { + var err error + if raw, err = encode(recordsArray(b.Records), nil); err != nil { + return err + } + b.recordsLen = len(raw) + } + + switch b.Codec { + case CompressionNone: + offset := pe.offset() + if err := recordsArray(b.Records).encode(pe); err != nil { + return err + } + b.recordsLen = pe.offset() - offset + case CompressionGZIP: + var buf bytes.Buffer + writer := gzip.NewWriter(&buf) + if _, err := writer.Write(raw); err != nil { + return err + } + if err := writer.Close(); err != nil { + return err + } + b.compressedRecords = buf.Bytes() + case CompressionSnappy: + b.compressedRecords = snappy.Encode(raw) + case CompressionLZ4: + var buf bytes.Buffer + writer := lz4.NewWriter(&buf) + if _, err := writer.Write(raw); err != nil { + return err + } + if err := writer.Close(); err != nil { + return err + } + b.compressedRecords = buf.Bytes() + default: + return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)} + } + + return nil +} + +func (b *RecordBatch) computeAttributes() int16 { + attr := int16(b.Codec) & int16(compressionCodecMask) + if b.Control { + attr |= controlMask + } + return attr +} + +func (b *RecordBatch) addRecord(r *Record) { + b.Records = append(b.Records, r) +} diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go new file mode 100644 index 0000000..54ee7e3 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/records.go @@ -0,0 +1,167 @@ +package sarama + +import "fmt" + +const ( + unknownRecords = iota + legacyRecords + defaultRecords + + magicOffset = 16 + magicLength = 1 +) + +// Records implements a union type containing either a RecordBatch or a legacy MessageSet. +type Records struct { + recordsType int + msgSet *MessageSet + recordBatch *RecordBatch +} + +func newLegacyRecords(msgSet *MessageSet) Records { + return Records{recordsType: legacyRecords, msgSet: msgSet} +} + +func newDefaultRecords(batch *RecordBatch) Records { + return Records{recordsType: defaultRecords, recordBatch: batch} +} + +// setTypeFromFields sets type of Records depending on which of msgSet or recordBatch is not nil. +// The first return value indicates whether both fields are nil (and the type is not set). +// If both fields are not nil, it returns an error. +func (r *Records) setTypeFromFields() (bool, error) { + if r.msgSet == nil && r.recordBatch == nil { + return true, nil + } + if r.msgSet != nil && r.recordBatch != nil { + return false, fmt.Errorf("both msgSet and recordBatch are set, but record type is unknown") + } + r.recordsType = defaultRecords + if r.msgSet != nil { + r.recordsType = legacyRecords + } + return false, nil +} + +func (r *Records) encode(pe packetEncoder) error { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return err + } + } + + switch r.recordsType { + case legacyRecords: + if r.msgSet == nil { + return nil + } + return r.msgSet.encode(pe) + case defaultRecords: + if r.recordBatch == nil { + return nil + } + return r.recordBatch.encode(pe) + } + return fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) setTypeFromMagic(pd packetDecoder) error { + dec, err := pd.peek(magicOffset, magicLength) + if err != nil { + return err + } + + magic, err := dec.getInt8() + if err != nil { + return err + } + + r.recordsType = defaultRecords + if magic < 2 { + r.recordsType = legacyRecords + } + return nil +} + +func (r *Records) decode(pd packetDecoder) error { + if r.recordsType == unknownRecords { + if err := r.setTypeFromMagic(pd); err != nil { + return nil + } + } + + switch r.recordsType { + case legacyRecords: + r.msgSet = &MessageSet{} + return r.msgSet.decode(pd) + case defaultRecords: + r.recordBatch = &RecordBatch{} + return r.recordBatch.decode(pd) + } + return fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) numRecords() (int, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return 0, err + } + } + + switch r.recordsType { + case legacyRecords: + if r.msgSet == nil { + return 0, nil + } + return len(r.msgSet.Messages), nil + case defaultRecords: + if r.recordBatch == nil { + return 0, nil + } + return len(r.recordBatch.Records), nil + } + return 0, fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) isPartial() (bool, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return false, err + } + } + + switch r.recordsType { + case unknownRecords: + return false, nil + case legacyRecords: + if r.msgSet == nil { + return false, nil + } + return r.msgSet.PartialTrailingMessage, nil + case defaultRecords: + if r.recordBatch == nil { + return false, nil + } + return r.recordBatch.PartialTrailingRecord, nil + } + return false, fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) isControl() (bool, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return false, err + } + } + + switch r.recordsType { + case legacyRecords: + return false, nil + case defaultRecords: + if r.recordBatch == nil { + return false, nil + } + return r.recordBatch.Control, nil + } + return false, fmt.Errorf("unknown records type: %v", r.recordsType) +} diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go index b9f654b..282932e 100644 --- a/vendor/github.com/Shopify/sarama/request.go +++ b/vendor/github.com/Shopify/sarama/request.go @@ -6,17 +6,18 @@ import ( "io" ) -type requestBody interface { +type protocolBody interface { encoder - decoder + versionedDecoder key() int16 version() int16 + requiredVersion() KafkaVersion } type request struct { correlationID int32 clientID string - body requestBody + body protocolBody } func (r *request) encode(pe packetEncoder) (err error) { @@ -53,40 +54,42 @@ func (r *request) decode(pd packetDecoder) (err error) { if r.body == nil { return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)} } - return r.body.decode(pd) + return r.body.decode(pd, version) } -func decodeRequest(r io.Reader) (req *request, err error) { +func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) { lengthBytes := make([]byte, 4) if _, err := io.ReadFull(r, lengthBytes); err != nil { - return nil, err + return nil, bytesRead, err } + bytesRead += len(lengthBytes) length := int32(binary.BigEndian.Uint32(lengthBytes)) if length <= 4 || length > MaxRequestSize { - return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} + return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} } encodedReq := make([]byte, length) if _, err := io.ReadFull(r, encodedReq); err != nil { - return nil, err + return nil, bytesRead, err } + bytesRead += len(encodedReq) req = &request{} if err := decode(encodedReq, req); err != nil { - return nil, err + return nil, bytesRead, err } - return req, nil + return req, bytesRead, nil } -func allocateBody(key, version int16) requestBody { +func allocateBody(key, version int16) protocolBody { switch key { case 0: return &ProduceRequest{} case 1: return &FetchRequest{} case 2: - return &OffsetRequest{} + return &OffsetRequest{Version: version} case 3: return &MetadataRequest{} case 8: @@ -107,6 +110,16 @@ func allocateBody(key, version int16) requestBody { return &DescribeGroupsRequest{} case 16: return &ListGroupsRequest{} + case 17: + return &SaslHandshakeRequest{} + case 18: + return &ApiVersionsRequest{} + case 19: + return &CreateTopicsRequest{} + case 20: + return &DeleteTopicsRequest{} + case 37: + return &CreatePartitionsRequest{} } return nil } diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go index ba96f0c..7d5dc60 100644 --- a/vendor/github.com/Shopify/sarama/sarama.go +++ b/vendor/github.com/Shopify/sarama/sarama.go @@ -1,19 +1,66 @@ /* -Package sarama provides client libraries for the Kafka protocol (versions 0.8 and later). The AsyncProducer object -is the high-level API for producing messages asynchronously; the SyncProducer provides a blocking API for the same purpose. -The Consumer object is the high-level API for consuming messages. The Client object provides metadata -management functionality that is shared between the higher-level objects. +Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level +API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level +API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation. -Note that Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking. -For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library -builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the -https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. +To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel +and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases. +The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be +useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees +depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the +SyncProducer can still sometimes be lost. + +To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic +consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the +https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 +and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. For lower-level needs, the Broker and Request/Response objects permit precise control over each connection -and message sent on the wire. +and message sent on the wire; the Client provides higher-level metadata management that is shared between +the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up +exactly with the protocol fields documented by Kafka at +https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol + +Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry. + +Broker related metrics: + + +----------------------------------------------+------------+---------------------------------------------------------------+ + | Name | Type | Description | + +----------------------------------------------+------------+---------------------------------------------------------------+ + | incoming-byte-rate | meter | Bytes/second read off all brokers | + | incoming-byte-rate-for-broker- | meter | Bytes/second read off a given broker | + | outgoing-byte-rate | meter | Bytes/second written off all brokers | + | outgoing-byte-rate-for-broker- | meter | Bytes/second written off a given broker | + | request-rate | meter | Requests/second sent to all brokers | + | request-rate-for-broker- | meter | Requests/second sent to a given broker | + | request-size | histogram | Distribution of the request size in bytes for all brokers | + | request-size-for-broker- | histogram | Distribution of the request size in bytes for a given broker | + | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers | + | request-latency-in-ms-for-broker- | histogram | Distribution of the request latency in ms for a given broker | + | response-rate | meter | Responses/second received from all brokers | + | response-rate-for-broker- | meter | Responses/second received from a given broker | + | response-size | histogram | Distribution of the response size in bytes for all brokers | + | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker | + +----------------------------------------------+------------+---------------------------------------------------------------+ + +Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics. + +Producer related metrics: + + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | Name | Type | Description | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics | + | batch-size-for-topic- | histogram | Distribution of the number of bytes sent per partition per request for a given topic | + | record-send-rate | meter | Records/second sent to all topics | + | record-send-rate-for-topic- | meter | Records/second sent to a given topic | + | records-per-request | histogram | Distribution of the number of records sent per request for all topics | + | records-per-request-for-topic- | histogram | Distribution of the number of records sent per request for a given topic | + | compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics | + | compression-ratio-for-topic- | histogram | Distribution of the compression ratio times 100 of record batches for a given topic | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ -The Request/Response objects and properties are mostly undocumented, as they line up exactly with the -protocol fields documented by Kafka at https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol */ package sarama diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go new file mode 100644 index 0000000..fbbc894 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go @@ -0,0 +1,33 @@ +package sarama + +type SaslHandshakeRequest struct { + Mechanism string +} + +func (r *SaslHandshakeRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.Mechanism); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) { + if r.Mechanism, err = pd.getString(); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeRequest) key() int16 { + return 17 +} + +func (r *SaslHandshakeRequest) version() int16 { + return 0 +} + +func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go new file mode 100644 index 0000000..ef290d4 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go @@ -0,0 +1,38 @@ +package sarama + +type SaslHandshakeResponse struct { + Err KError + EnabledMechanisms []string +} + +func (r *SaslHandshakeResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return pe.putStringArray(r.EnabledMechanisms) +} + +func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + if r.EnabledMechanisms, err = pd.getStringArray(); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeResponse) key() int16 { + return 17 +} + +func (r *SaslHandshakeResponse) version() int16 { + return 0 +} + +func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/snappy.go b/vendor/github.com/Shopify/sarama/snappy.go deleted file mode 100644 index e86cb70..0000000 --- a/vendor/github.com/Shopify/sarama/snappy.go +++ /dev/null @@ -1,41 +0,0 @@ -package sarama - -import ( - "bytes" - "encoding/binary" - - "github.com/golang/snappy" -) - -var snappyMagic = []byte{130, 83, 78, 65, 80, 80, 89, 0} - -// SnappyEncode encodes binary data -func snappyEncode(src []byte) []byte { - return snappy.Encode(nil, src) -} - -// SnappyDecode decodes snappy data -func snappyDecode(src []byte) ([]byte, error) { - if bytes.Equal(src[:8], snappyMagic) { - var ( - pos = uint32(16) - max = uint32(len(src)) - dst = make([]byte, 0, len(src)) - chunk []byte - err error - ) - for pos < max { - size := binary.BigEndian.Uint32(src[pos : pos+4]) - pos += 4 - - chunk, err = snappy.Decode(chunk, src[pos:pos+size]) - if err != nil { - return nil, err - } - pos += size - dst = append(dst, chunk...) - } - return dst, nil - } - return snappy.Decode(nil, src) -} diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go index 031cf0f..fe20708 100644 --- a/vendor/github.com/Shopify/sarama/sync_group_request.go +++ b/vendor/github.com/Shopify/sarama/sync_group_request.go @@ -33,7 +33,7 @@ func (r *SyncGroupRequest) encode(pe packetEncoder) error { return nil } -func (r *SyncGroupRequest) decode(pd packetDecoder) (err error) { +func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) { if r.GroupId, err = pd.getString(); err != nil { return } @@ -77,6 +77,10 @@ func (r *SyncGroupRequest) version() int16 { return 0 } +func (r *SyncGroupRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) { if r.GroupAssignments == nil { r.GroupAssignments = make(map[string][]byte) @@ -86,7 +90,7 @@ func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment } func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error { - bin, err := encode(memberAssignment) + bin, err := encode(memberAssignment, nil) if err != nil { return err } diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go index 49c8692..194b382 100644 --- a/vendor/github.com/Shopify/sarama/sync_group_response.go +++ b/vendor/github.com/Shopify/sarama/sync_group_response.go @@ -16,13 +16,26 @@ func (r *SyncGroupResponse) encode(pe packetEncoder) error { return pe.putBytes(r.MemberAssignment) } -func (r *SyncGroupResponse) decode(pd packetDecoder) (err error) { - if kerr, err := pd.getInt16(); err != nil { +func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { return err - } else { - r.Err = KError(kerr) } + r.Err = KError(kerr) + r.MemberAssignment, err = pd.getBytes() return } + +func (r *SyncGroupResponse) key() int16 { + return 14 +} + +func (r *SyncGroupResponse) version() int16 { + return 0 +} + +func (r *SyncGroupResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go index 69a26d1..dd096b6 100644 --- a/vendor/github.com/Shopify/sarama/sync_producer.go +++ b/vendor/github.com/Shopify/sarama/sync_producer.go @@ -2,9 +2,16 @@ package sarama import "sync" -// SyncProducer publishes Kafka messages. It routes messages to the correct broker, refreshing metadata as appropriate, -// and parses responses for errors. You must call Close() on a producer to avoid leaks, it may not be garbage-collected automatically when -// it passes out of scope. +// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct +// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer +// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope. +// +// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual +// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`. +// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost. +// +// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to +// be set to true in its configuration. type SyncProducer interface { // SendMessage produces a given message, and returns only when it either has @@ -12,10 +19,16 @@ type SyncProducer interface { // of the produced message, or an error if the message failed to produce. SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) - // Close shuts down the producer and flushes any messages it may have buffered. - // You must call this function before a producer object passes out of scope, as - // it may otherwise leak memory. You must call this before calling Close on the - // underlying client. + // SendMessages produces a given set of messages, and returns only when all + // messages in the set have either succeeded or failed. Note that messages + // can succeed and fail individually; if some succeed and some fail, + // SendMessages will return an error. + SendMessages(msgs []*ProducerMessage) error + + // Close shuts down the producer and waits for any buffered messages to be + // flushed. You must call this function before a producer object passes out of + // scope, as it may otherwise leak memory. You must call this before calling + // Close on the underlying client. Close() error } @@ -26,6 +39,15 @@ type syncProducer struct { // NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration. func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) { + if config == nil { + config = NewConfig() + config.Producer.Return.Successes = true + } + + if err := verifyProducerConfig(config); err != nil { + return nil, err + } + p, err := NewAsyncProducer(addrs, config) if err != nil { return nil, err @@ -36,6 +58,10 @@ func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) { // NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still // necessary to call Close() on the underlying client when shutting down this producer. func NewSyncProducerFromClient(client Client) (SyncProducer, error) { + if err := verifyProducerConfig(client.Config()); err != nil { + return nil, err + } + p, err := NewAsyncProducerFromClient(client) if err != nil { return nil, err @@ -44,8 +70,6 @@ func NewSyncProducerFromClient(client Client) (SyncProducer, error) { } func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer { - p.conf.Producer.Return.Successes = true - p.conf.Producer.Return.Errors = true sp := &syncProducer{producer: p} sp.wg.Add(2) @@ -55,27 +79,72 @@ func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer { return sp } +func verifyProducerConfig(config *Config) error { + if !config.Producer.Return.Errors { + return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer") + } + if !config.Producer.Return.Successes { + return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer") + } + return nil +} + func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) { oldMetadata := msg.Metadata defer func() { msg.Metadata = oldMetadata }() - expectation := make(chan error, 1) + expectation := make(chan *ProducerError, 1) msg.Metadata = expectation sp.producer.Input() <- msg if err := <-expectation; err != nil { - return -1, -1, err + return -1, -1, err.Err } return msg.Partition, msg.Offset, nil } +func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error { + savedMetadata := make([]interface{}, len(msgs)) + for i := range msgs { + savedMetadata[i] = msgs[i].Metadata + } + defer func() { + for i := range msgs { + msgs[i].Metadata = savedMetadata[i] + } + }() + + expectations := make(chan chan *ProducerError, len(msgs)) + go func() { + for _, msg := range msgs { + expectation := make(chan *ProducerError, 1) + msg.Metadata = expectation + sp.producer.Input() <- msg + expectations <- expectation + } + close(expectations) + }() + + var errors ProducerErrors + for expectation := range expectations { + if err := <-expectation; err != nil { + errors = append(errors, err) + } + } + + if len(errors) > 0 { + return errors + } + return nil +} + func (sp *syncProducer) handleSuccesses() { defer sp.wg.Done() for msg := range sp.producer.Successes() { - expectation := msg.Metadata.(chan error) + expectation := msg.Metadata.(chan *ProducerError) expectation <- nil } } @@ -83,8 +152,8 @@ func (sp *syncProducer) handleSuccesses() { func (sp *syncProducer) handleErrors() { defer sp.wg.Done() for err := range sp.producer.Errors() { - expectation := err.Msg.Metadata.(chan error) - expectation <- err.Err + expectation := err.Msg.Metadata.(chan *ProducerError) + expectation <- err } } diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/Shopify/sarama/timestamp.go new file mode 100644 index 0000000..372278d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/timestamp.go @@ -0,0 +1,40 @@ +package sarama + +import ( + "fmt" + "time" +) + +type Timestamp struct { + *time.Time +} + +func (t Timestamp) encode(pe packetEncoder) error { + timestamp := int64(-1) + + if !t.Before(time.Unix(0, 0)) { + timestamp = t.UnixNano() / int64(time.Millisecond) + } else if !t.IsZero() { + return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", t)} + } + + pe.putInt64(timestamp) + return nil +} + +func (t Timestamp) decode(pd packetDecoder) error { + millis, err := pd.getInt64() + if err != nil { + return err + } + + // negative timestamps are invalid, in these cases we should return + // a zero time + timestamp := time.Time{} + if millis >= 0 { + timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) + } + + *t.Time = timestamp + return nil +} diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go index 04ca887..9d7b60f 100644 --- a/vendor/github.com/Shopify/sarama/utils.go +++ b/vendor/github.com/Shopify/sarama/utils.go @@ -2,8 +2,9 @@ package sarama import ( "bufio" + "fmt" "net" - "sort" + "regexp" ) type none struct{} @@ -23,13 +24,11 @@ func (slice int32Slice) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } -func dupeAndSort(input []int32) []int32 { +func dupInt32Slice(input []int32) []int32 { ret := make([]int32, 0, len(input)) for _, val := range input { ret = append(ret, val) } - - sort.Sort(int32Slice(ret)) return ret } @@ -109,3 +108,77 @@ func newBufConn(conn net.Conn) *bufConn { func (bc *bufConn) Read(b []byte) (n int, err error) { return bc.buf.Read(b) } + +// KafkaVersion instances represent versions of the upstream Kafka broker. +type KafkaVersion struct { + // it's a struct rather than just typing the array directly to make it opaque and stop people + // generating their own arbitrary versions + version [4]uint +} + +func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion { + return KafkaVersion{ + version: [4]uint{major, minor, veryMinor, patch}, + } +} + +// IsAtLeast return true if and only if the version it is called on is +// greater than or equal to the version passed in: +// V1.IsAtLeast(V2) // false +// V2.IsAtLeast(V1) // true +func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool { + for i := range v.version { + if v.version[i] > other.version[i] { + return true + } else if v.version[i] < other.version[i] { + return false + } + } + return true +} + +// Effective constants defining the supported kafka versions. +var ( + V0_8_2_0 = newKafkaVersion(0, 8, 2, 0) + V0_8_2_1 = newKafkaVersion(0, 8, 2, 1) + V0_8_2_2 = newKafkaVersion(0, 8, 2, 2) + V0_9_0_0 = newKafkaVersion(0, 9, 0, 0) + V0_9_0_1 = newKafkaVersion(0, 9, 0, 1) + V0_10_0_0 = newKafkaVersion(0, 10, 0, 0) + V0_10_0_1 = newKafkaVersion(0, 10, 0, 1) + V0_10_1_0 = newKafkaVersion(0, 10, 1, 0) + V0_10_2_0 = newKafkaVersion(0, 10, 2, 0) + V0_11_0_0 = newKafkaVersion(0, 11, 0, 0) + V1_0_0_0 = newKafkaVersion(1, 0, 0, 0) + minVersion = V0_8_2_0 +) + +func ParseKafkaVersion(s string) (KafkaVersion, error) { + var major, minor, veryMinor, patch uint + var err error + if s[0] == '0' { + err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch}) + } else { + err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor}) + } + if err != nil { + return minVersion, err + } + return newKafkaVersion(major, minor, veryMinor, patch), nil +} + +func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error { + if !regexp.MustCompile(pattern).MatchString(s) { + return fmt.Errorf("invalid version `%s`", s) + } + _, err := fmt.Sscanf(s, format, v[0], v[1], v[2]) + return err +} + +func (v KafkaVersion) String() string { + if v.version[0] == 0 { + return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3]) + } else { + return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2]) + } +} diff --git a/vendor/github.com/eapache/go-xerial-snappy/.gitignore b/vendor/github.com/eapache/go-xerial-snappy/.gitignore new file mode 100644 index 0000000..daf913b --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/eapache/go-xerial-snappy/.travis.yml b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml new file mode 100644 index 0000000..d6cf4f1 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: +- 1.5.4 +- 1.6.1 + +sudo: false diff --git a/vendor/github.com/eapache/go-xerial-snappy/LICENSE b/vendor/github.com/eapache/go-xerial-snappy/LICENSE new file mode 100644 index 0000000..5bf3688 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/eapache/go-xerial-snappy/README.md b/vendor/github.com/eapache/go-xerial-snappy/README.md new file mode 100644 index 0000000..3f2695c --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/README.md @@ -0,0 +1,13 @@ +# go-xerial-snappy + +[![Build Status](https://travis-ci.org/eapache/go-xerial-snappy.svg?branch=master)](https://travis-ci.org/eapache/go-xerial-snappy) + +Xerial-compatible Snappy framing support for golang. + +Packages using Xerial for snappy encoding use a framing format incompatible with +basically everything else in existence. This package wraps Go's built-in snappy +package to support it. + +Apps that use this format include Apache Kafka (see +https://github.com/dpkp/kafka-python/issues/126#issuecomment-35478921 for +details). diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go new file mode 100644 index 0000000..b8f8b51 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/snappy.go @@ -0,0 +1,43 @@ +package snappy + +import ( + "bytes" + "encoding/binary" + + master "github.com/golang/snappy" +) + +var xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0} + +// Encode encodes data as snappy with no framing header. +func Encode(src []byte) []byte { + return master.Encode(nil, src) +} + +// Decode decodes snappy data whether it is traditional unframed +// or includes the xerial framing format. +func Decode(src []byte) ([]byte, error) { + if !bytes.Equal(src[:8], xerialHeader) { + return master.Decode(nil, src) + } + + var ( + pos = uint32(16) + max = uint32(len(src)) + dst = make([]byte, 0, len(src)) + chunk []byte + err error + ) + for pos < max { + size := binary.BigEndian.Uint32(src[pos : pos+4]) + pos += 4 + + chunk, err = master.Decode(chunk, src[pos:pos+size]) + if err != nil { + return nil, err + } + pos += size + dst = append(dst, chunk...) + } + return dst, nil +} diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore new file mode 100644 index 0000000..c2bb6e4 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/.gitignore @@ -0,0 +1,31 @@ +# Created by https://www.gitignore.io/api/macos + +### macOS ### +*.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +# End of https://www.gitignore.io/api/macos diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml new file mode 100644 index 0000000..78be21c --- /dev/null +++ b/vendor/github.com/pierrec/lz4/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - 1.x + +script: + - go test -v -cpu=2 + - go test -v -cpu=2 -race \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE new file mode 100644 index 0000000..bd899d8 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2015, Pierre Curto +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of xxHash nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md new file mode 100644 index 0000000..dd3c9d4 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/README.md @@ -0,0 +1,31 @@ +[![godoc](https://godoc.org/github.com/pierrec/lz4?status.png)](https://godoc.org/github.com/pierrec/lz4) +[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4) + +# lz4 +LZ4 compression and decompression in pure Go + +## Usage + +```go +import "github.com/pierrec/lz4" +``` + +## Description + +Package lz4 implements reading and writing lz4 compressed data (a frame), +as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html, +using an io.Reader (decompression) and io.Writer (compression). +It is designed to minimize memory usage while maximizing throughput by being able to +[de]compress data concurrently. + +The Reader and the Writer support concurrent processing provided the supplied buffers are +large enough (in multiples of BlockMaxSize) and there is no block dependency. +Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently. +The runtime.GOMAXPROCS() value is used to apply concurrency or not. + +Although the block level compression and decompression functions are exposed and are fully compatible +with the lz4 block format definition, they are low level and should not be used directly. +For a complete description of an lz4 compressed block, see: +http://fastcompression.blogspot.fr/2011/05/lz4-explained.html + +See https://github.com/Cyan4973/lz4 for the reference C implementation. diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go new file mode 100644 index 0000000..44e3eaa --- /dev/null +++ b/vendor/github.com/pierrec/lz4/block.go @@ -0,0 +1,454 @@ +package lz4 + +import ( + "encoding/binary" + "errors" +) + +// block represents a frame data block. +// Used when compressing or decompressing frame blocks concurrently. +type block struct { + compressed bool + zdata []byte // compressed data + data []byte // decompressed data + offset int // offset within the data as with block dependency the 64Kb window is prepended to it + checksum uint32 // compressed data checksum + err error // error while [de]compressing +} + +var ( + // ErrInvalidSource is returned by UncompressBlock when a compressed block is corrupted. + ErrInvalidSource = errors.New("lz4: invalid source") + // ErrShortBuffer is returned by UncompressBlock, CompressBlock or CompressBlockHC when + // the supplied buffer for [de]compression is too small. + ErrShortBuffer = errors.New("lz4: short buffer") +) + +// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. +func CompressBlockBound(n int) int { + return n + n/255 + 16 +} + +// UncompressBlock decompresses the source buffer into the destination one, +// starting at the di index and returning the decompressed size. +// +// The destination buffer must be sized appropriately. +// +// An error is returned if the source data is invalid or the destination buffer is too small. +func UncompressBlock(src, dst []byte, di int) (int, error) { + si, sn, di0 := 0, len(src), di + if sn == 0 { + return 0, nil + } + + for { + // literals and match lengths (token) + lLen := int(src[si] >> 4) + mLen := int(src[si] & 0xF) + if si++; si == sn { + return di, ErrInvalidSource + } + + // literals + if lLen > 0 { + if lLen == 0xF { + for src[si] == 0xFF { + lLen += 0xFF + if si++; si == sn { + return di - di0, ErrInvalidSource + } + } + lLen += int(src[si]) + if si++; si == sn { + return di - di0, ErrInvalidSource + } + } + if len(dst)-di < lLen || si+lLen > sn { + return di - di0, ErrShortBuffer + } + di += copy(dst[di:], src[si:si+lLen]) + + if si += lLen; si >= sn { + return di - di0, nil + } + } + + if si += 2; si >= sn { + return di, ErrInvalidSource + } + offset := int(src[si-2]) | int(src[si-1])<<8 + if di-offset < 0 || offset == 0 { + return di - di0, ErrInvalidSource + } + + // match + if mLen == 0xF { + for src[si] == 0xFF { + mLen += 0xFF + if si++; si == sn { + return di - di0, ErrInvalidSource + } + } + mLen += int(src[si]) + if si++; si == sn { + return di - di0, ErrInvalidSource + } + } + // minimum match length is 4 + mLen += 4 + if len(dst)-di <= mLen { + return di - di0, ErrShortBuffer + } + + // copy the match (NB. match is at least 4 bytes long) + if mLen >= offset { + bytesToCopy := offset * (mLen / offset) + // Efficiently copy the match dst[di-offset:di] into the slice + // dst[di:di+bytesToCopy] + expanded := dst[di-offset : di+bytesToCopy] + n := offset + for n <= bytesToCopy+offset { + copy(expanded[n:], expanded[:n]) + n *= 2 + } + di += bytesToCopy + mLen -= bytesToCopy + } + + di += copy(dst[di:], dst[di-offset:di-offset+mLen]) + } +} + +// CompressBlock compresses the source buffer starting at soffet into the destination one. +// This is the fast version of LZ4 compression and also the default one. +// +// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible. +// +// An error is returned if the destination buffer is too small. +func CompressBlock(src, dst []byte, soffset int) (int, error) { + sn, dn := len(src)-mfLimit, len(dst) + if sn <= 0 || dn == 0 || soffset >= sn { + return 0, nil + } + var si, di int + + // fast scan strategy: + // we only need a hash table to store the last sequences (4 bytes) + var hashTable [1 << hashLog]int + var hashShift = uint((minMatch * 8) - hashLog) + + // Initialise the hash table with the first 64Kb of the input buffer + // (used when compressing dependent blocks) + for si < soffset { + h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift + si++ + hashTable[h] = si + } + + anchor := si + fma := 1 << skipStrength + for si < sn-minMatch { + // hash the next 4 bytes (sequence)... + h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift + // -1 to separate existing entries from new ones + ref := hashTable[h] - 1 + // ...and store the position of the hash in the hash table (+1 to compensate the -1 upon saving) + hashTable[h] = si + 1 + // no need to check the last 3 bytes in the first literal 4 bytes as + // this guarantees that the next match, if any, is compressed with + // a lower size, since to have some compression we must have: + // ll+ml-overlap > 1 + (ll-15)/255 + (ml-4-15)/255 + 2 (uncompressed size>compressed size) + // => ll+ml>3+2*overlap => ll+ml>= 4+2*overlap + // and by definition we do have: + // ll >= 1, ml >= 4 + // => ll+ml >= 5 + // => so overlap must be 0 + + // the sequence is new, out of bound (64kb) or not valid: try next sequence + if ref < 0 || fma&(1<>winSizeLog > 0 || + src[ref] != src[si] || + src[ref+1] != src[si+1] || + src[ref+2] != src[si+2] || + src[ref+3] != src[si+3] { + // variable step: improves performance on non-compressible data + si += fma >> skipStrength + fma++ + continue + } + // match found + fma = 1 << skipStrength + lLen := si - anchor + offset := si - ref + + // encode match length part 1 + si += minMatch + mLen := si // match length has minMatch already + for si <= sn && src[si] == src[si-offset] { + si++ + } + mLen = si - mLen + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // encode literals length + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + if di++; di == dn { + return di, ErrShortBuffer + } + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(l) + } + if di++; di == dn { + return di, ErrShortBuffer + } + + // literals + if di+lLen >= dn { + return di, ErrShortBuffer + } + di += copy(dst[di:], src[anchor:anchor+lLen]) + anchor = si + + // encode offset + if di += 2; di >= dn { + return di, ErrShortBuffer + } + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // encode match length part 2 + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(mLen) + if di++; di == dn { + return di, ErrShortBuffer + } + } + } + + if anchor == 0 { + // incompressible + return 0, nil + } + + // last literals + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + if di++; di == dn { + return di, ErrShortBuffer + } + lLen -= 0xF + for ; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(lLen) + } + if di++; di == dn { + return di, ErrShortBuffer + } + + // write literals + src = src[anchor:] + switch n := di + len(src); { + case n > dn: + return di, ErrShortBuffer + case n >= sn: + // incompressible + return 0, nil + } + di += copy(dst[di:], src) + return di, nil +} + +// CompressBlockHC compresses the source buffer starting at soffet into the destination one. +// CompressBlockHC compression ratio is better than CompressBlock but it is also slower. +// +// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible. +// +// An error is returned if the destination buffer is too small. +func CompressBlockHC(src, dst []byte, soffset int) (int, error) { + sn, dn := len(src)-mfLimit, len(dst) + if sn <= 0 || dn == 0 || soffset >= sn { + return 0, nil + } + var si, di int + + // Hash Chain strategy: + // we need a hash table and a chain table + // the chain table cannot contain more entries than the window size (64Kb entries) + var hashTable [1 << hashLog]int + var chainTable [winSize]int + var hashShift = uint((minMatch * 8) - hashLog) + + // Initialise the hash table with the first 64Kb of the input buffer + // (used when compressing dependent blocks) + for si < soffset { + h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift + chainTable[si&winMask] = hashTable[h] + si++ + hashTable[h] = si + } + + anchor := si + for si < sn-minMatch { + // hash the next 4 bytes (sequence)... + h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift + + // follow the chain until out of window and give the longest match + mLen := 0 + offset := 0 + for next := hashTable[h] - 1; next > 0 && next > si-winSize; next = chainTable[next&winMask] - 1 { + // the first (mLen==0) or next byte (mLen>=minMatch) at current match length must match to improve on the match length + if src[next+mLen] == src[si+mLen] { + for ml := 0; ; ml++ { + if src[next+ml] != src[si+ml] || si+ml > sn { + // found a longer match, keep its position and length + if mLen < ml && ml >= minMatch { + mLen = ml + offset = si - next + } + break + } + } + } + } + chainTable[si&winMask] = hashTable[h] + hashTable[h] = si + 1 + + // no match found + if mLen == 0 { + si++ + continue + } + + // match found + // update hash/chain tables with overlaping bytes: + // si already hashed, add everything from si+1 up to the match length + for si, ml := si+1, si+mLen; si < ml; { + h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift + chainTable[si&winMask] = hashTable[h] + si++ + hashTable[h] = si + } + + lLen := si - anchor + si += mLen + mLen -= minMatch // match length does not include minMatch + + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // encode literals length + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + if di++; di == dn { + return di, ErrShortBuffer + } + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(l) + } + if di++; di == dn { + return di, ErrShortBuffer + } + + // literals + if di+lLen >= dn { + return di, ErrShortBuffer + } + di += copy(dst[di:], src[anchor:anchor+lLen]) + anchor = si + + // encode offset + if di += 2; di >= dn { + return di, ErrShortBuffer + } + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // encode match length part 2 + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(mLen) + if di++; di == dn { + return di, ErrShortBuffer + } + } + } + + if anchor == 0 { + // incompressible + return 0, nil + } + + // last literals + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + if di++; di == dn { + return di, ErrShortBuffer + } + lLen -= 0xF + for ; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(lLen) + } + if di++; di == dn { + return di, ErrShortBuffer + } + + // write literals + src = src[anchor:] + switch n := di + len(src); { + case n > dn: + return di, ErrShortBuffer + case n >= sn: + // incompressible + return 0, nil + } + di += copy(dst[di:], src) + return di, nil +} diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go new file mode 100644 index 0000000..ddb82f6 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/lz4.go @@ -0,0 +1,105 @@ +// Package lz4 implements reading and writing lz4 compressed data (a frame), +// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html, +// using an io.Reader (decompression) and io.Writer (compression). +// It is designed to minimize memory usage while maximizing throughput by being able to +// [de]compress data concurrently. +// +// The Reader and the Writer support concurrent processing provided the supplied buffers are +// large enough (in multiples of BlockMaxSize) and there is no block dependency. +// Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently. +// The runtime.GOMAXPROCS() value is used to apply concurrency or not. +// +// Although the block level compression and decompression functions are exposed and are fully compatible +// with the lz4 block format definition, they are low level and should not be used directly. +// For a complete description of an lz4 compressed block, see: +// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html +// +// See https://github.com/Cyan4973/lz4 for the reference C implementation. +package lz4 + +import ( + "hash" + "sync" + + "github.com/pierrec/xxHash/xxHash32" +) + +const ( + // Extension is the LZ4 frame file name extension + Extension = ".lz4" + // Version is the LZ4 frame format version + Version = 1 + + frameMagic = uint32(0x184D2204) + frameSkipMagic = uint32(0x184D2A50) + + // The following constants are used to setup the compression algorithm. + minMatch = 4 // the minimum size of the match sequence size (4 bytes) + winSizeLog = 16 // LZ4 64Kb window size limit + winSize = 1 << winSizeLog + winMask = winSize - 1 // 64Kb window of previous data for dependent blocks + + // hashLog determines the size of the hash table used to quickly find a previous match position. + // Its value influences the compression speed and memory usage, the lower the faster, + // but at the expense of the compression ratio. + // 16 seems to be the best compromise. + hashLog = 16 + hashTableSize = 1 << hashLog + hashShift = uint((minMatch * 8) - hashLog) + + mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes. + skipStrength = 6 // variable step for fast scan + + hasher = uint32(2654435761) // prime number used to hash minMatch +) + +// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb. +var bsMapID = map[byte]int{4: 64 << 10, 5: 256 << 10, 6: 1 << 20, 7: 4 << 20} +var bsMapValue = map[int]byte{} + +// Reversed. +func init() { + for i, v := range bsMapID { + bsMapValue[v] = i + } +} + +// Header describes the various flags that can be set on a Writer or obtained from a Reader. +// The default values match those of the LZ4 frame format definition (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). +// +// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls. +// It is the caller responsibility to check them if necessary (typically when using the Reader concurrency). +type Header struct { + BlockDependency bool // compressed blocks are dependent (one block depends on the last 64Kb of the previous one) + BlockChecksum bool // compressed blocks are checksumed + NoChecksum bool // frame checksum + BlockMaxSize int // the size of the decompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB. + Size uint64 // the frame total size. It is _not_ computed by the Writer. + HighCompression bool // use high compression (only for the Writer) + done bool // whether the descriptor was processed (Read or Write and checked) + // Removed as not supported + // Dict bool // a dictionary id is to be used + // DictID uint32 // the dictionary id read from the frame, if any. +} + +// xxhPool wraps the standard pool for xxHash items. +// Putting items back in the pool automatically resets them. +type xxhPool struct { + sync.Pool +} + +func (p *xxhPool) Get() hash.Hash32 { + return p.Pool.Get().(hash.Hash32) +} + +func (p *xxhPool) Put(h hash.Hash32) { + h.Reset() + p.Pool.Put(h) +} + +// hashPool is used by readers and writers and contains xxHash items. +var hashPool = xxhPool{ + Pool: sync.Pool{ + New: func() interface{} { return xxHash32.New(0) }, + }, +} diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go new file mode 100644 index 0000000..9f7fd60 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/reader.go @@ -0,0 +1,364 @@ +package lz4 + +import ( + "encoding/binary" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "runtime" + "sync" + "sync/atomic" +) + +// ErrInvalid is returned when the data being read is not an LZ4 archive +// (LZ4 magic number detection failed). +var ErrInvalid = errors.New("invalid lz4 data") + +// errEndOfBlock is returned by readBlock when it has reached the last block of the frame. +// It is not an error. +var errEndOfBlock = errors.New("end of block") + +// Reader implements the LZ4 frame decoder. +// The Header is set after the first call to Read(). +// The Header may change between Read() calls in case of concatenated frames. +type Reader struct { + Pos int64 // position within the source + Header + src io.Reader + checksum hash.Hash32 // frame hash + wg sync.WaitGroup // decompressing go routine wait group + data []byte // buffered decompressed data + window []byte // 64Kb decompressed data window +} + +// NewReader returns a new LZ4 frame decoder. +// No access to the underlying io.Reader is performed. +func NewReader(src io.Reader) *Reader { + return &Reader{ + src: src, + checksum: hashPool.Get(), + } +} + +// readHeader checks the frame magic number and parses the frame descriptoz. +// Skippable frames are supported even as a first frame although the LZ4 +// specifications recommends skippable frames not to be used as first frames. +func (z *Reader) readHeader(first bool) error { + defer z.checksum.Reset() + + for { + var magic uint32 + if err := binary.Read(z.src, binary.LittleEndian, &magic); err != nil { + if !first && err == io.ErrUnexpectedEOF { + return io.EOF + } + return err + } + z.Pos += 4 + if magic>>8 == frameSkipMagic>>8 { + var skipSize uint32 + if err := binary.Read(z.src, binary.LittleEndian, &skipSize); err != nil { + return err + } + z.Pos += 4 + m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize)) + z.Pos += m + if err != nil { + return err + } + continue + } + if magic != frameMagic { + return ErrInvalid + } + break + } + + // header + var buf [8]byte + if _, err := io.ReadFull(z.src, buf[:2]); err != nil { + return err + } + z.Pos += 2 + + b := buf[0] + if b>>6 != Version { + return fmt.Errorf("lz4.Read: invalid version: got %d expected %d", b>>6, Version) + } + z.BlockDependency = b>>5&1 == 0 + z.BlockChecksum = b>>4&1 > 0 + frameSize := b>>3&1 > 0 + z.NoChecksum = b>>2&1 == 0 + // z.Dict = b&1 > 0 + + bmsID := buf[1] >> 4 & 0x7 + bSize, ok := bsMapID[bmsID] + if !ok { + return fmt.Errorf("lz4.Read: invalid block max size: %d", bmsID) + } + z.BlockMaxSize = bSize + + z.checksum.Write(buf[0:2]) + + if frameSize { + if err := binary.Read(z.src, binary.LittleEndian, &z.Size); err != nil { + return err + } + z.Pos += 8 + binary.LittleEndian.PutUint64(buf[:], z.Size) + z.checksum.Write(buf[0:8]) + } + + // if z.Dict { + // if err := binary.Read(z.src, binary.LittleEndian, &z.DictID); err != nil { + // return err + // } + // z.Pos += 4 + // binary.LittleEndian.PutUint32(buf[:], z.DictID) + // z.checksum.Write(buf[0:4]) + // } + + // header checksum + if _, err := io.ReadFull(z.src, buf[:1]); err != nil { + return err + } + z.Pos++ + if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] { + return fmt.Errorf("lz4.Read: invalid header checksum: got %v expected %v", buf[0], h) + } + + z.Header.done = true + + return nil +} + +// Read decompresses data from the underlying source into the supplied buffer. +// +// Since there can be multiple streams concatenated, Header values may +// change between calls to Read(). If that is the case, no data is actually read from +// the underlying io.Reader, to allow for potential input buffer resizing. +// +// Data is buffered if the input buffer is too small, and exhausted upon successive calls. +// +// If the buffer is large enough (typically in multiples of BlockMaxSize) and there is +// no block dependency, then the data will be decompressed concurrently based on the GOMAXPROCS value. +func (z *Reader) Read(buf []byte) (n int, err error) { + if !z.Header.done { + if err = z.readHeader(true); err != nil { + return + } + } + + if len(buf) == 0 { + return + } + + // exhaust remaining data from previous Read() + if len(z.data) > 0 { + n = copy(buf, z.data) + z.data = z.data[n:] + if len(z.data) == 0 { + z.data = nil + } + return + } + + // Break up the input buffer into BlockMaxSize blocks with at least one block. + // Then decompress into each of them concurrently if possible (no dependency). + // In case of dependency, the first block will be missing the window (except on the + // very first call), the rest will have it already since it comes from the previous block. + wbuf := buf + zn := (len(wbuf) + z.BlockMaxSize - 1) / z.BlockMaxSize + zblocks := make([]block, zn) + for zi, abort := 0, uint32(0); zi < zn && atomic.LoadUint32(&abort) == 0; zi++ { + zb := &zblocks[zi] + // last block may be too small + if len(wbuf) < z.BlockMaxSize+len(z.window) { + wbuf = make([]byte, z.BlockMaxSize+len(z.window)) + } + copy(wbuf, z.window) + if zb.err = z.readBlock(wbuf, zb); zb.err != nil { + break + } + wbuf = wbuf[z.BlockMaxSize:] + if !z.BlockDependency { + z.wg.Add(1) + go z.decompressBlock(zb, &abort) + continue + } + // cannot decompress concurrently when dealing with block dependency + z.decompressBlock(zb, nil) + // the last block may not contain enough data + if len(z.window) == 0 { + z.window = make([]byte, winSize) + } + if len(zb.data) >= winSize { + copy(z.window, zb.data[len(zb.data)-winSize:]) + } else { + copy(z.window, z.window[len(zb.data):]) + copy(z.window[len(zb.data)+1:], zb.data) + } + } + z.wg.Wait() + + // since a block size may be less then BlockMaxSize, trim the decompressed buffers + for _, zb := range zblocks { + if zb.err != nil { + if zb.err == errEndOfBlock { + return n, z.close() + } + return n, zb.err + } + bLen := len(zb.data) + if !z.NoChecksum { + z.checksum.Write(zb.data) + } + m := copy(buf[n:], zb.data) + // buffer the remaining data (this is necessarily the last block) + if m < bLen { + z.data = zb.data[m:] + } + n += m + } + + return +} + +// readBlock reads an entire frame block from the frame. +// The input buffer is the one that will receive the decompressed data. +// If the end of the frame is detected, it returns the errEndOfBlock error. +func (z *Reader) readBlock(buf []byte, b *block) error { + var bLen uint32 + if err := binary.Read(z.src, binary.LittleEndian, &bLen); err != nil { + return err + } + atomic.AddInt64(&z.Pos, 4) + + switch { + case bLen == 0: + return errEndOfBlock + case bLen&(1<<31) == 0: + b.compressed = true + b.data = buf + b.zdata = make([]byte, bLen) + default: + bLen = bLen & (1<<31 - 1) + if int(bLen) > len(buf) { + return fmt.Errorf("lz4.Read: invalid block size: %d", bLen) + } + b.data = buf[:bLen] + b.zdata = buf[:bLen] + } + if _, err := io.ReadFull(z.src, b.zdata); err != nil { + return err + } + + if z.BlockChecksum { + if err := binary.Read(z.src, binary.LittleEndian, &b.checksum); err != nil { + return err + } + xxh := hashPool.Get() + defer hashPool.Put(xxh) + xxh.Write(b.zdata) + if h := xxh.Sum32(); h != b.checksum { + return fmt.Errorf("lz4.Read: invalid block checksum: got %x expected %x", h, b.checksum) + } + } + + return nil +} + +// decompressBlock decompresses a frame block. +// In case of an error, the block err is set with it and abort is set to 1. +func (z *Reader) decompressBlock(b *block, abort *uint32) { + if abort != nil { + defer z.wg.Done() + } + if b.compressed { + n := len(z.window) + m, err := UncompressBlock(b.zdata, b.data, n) + if err != nil { + if abort != nil { + atomic.StoreUint32(abort, 1) + } + b.err = err + return + } + b.data = b.data[n : n+m] + } + atomic.AddInt64(&z.Pos, int64(len(b.data))) +} + +// close validates the frame checksum (if any) and checks the next frame (if any). +func (z *Reader) close() error { + if !z.NoChecksum { + var checksum uint32 + if err := binary.Read(z.src, binary.LittleEndian, &checksum); err != nil { + return err + } + if checksum != z.checksum.Sum32() { + return fmt.Errorf("lz4.Read: invalid frame checksum: got %x expected %x", z.checksum.Sum32(), checksum) + } + } + + // get ready for the next concatenated frame, but do not change the position + pos := z.Pos + z.Reset(z.src) + z.Pos = pos + + // since multiple frames can be concatenated, check for another one + return z.readHeader(false) +} + +// Reset discards the Reader's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *Reader) Reset(r io.Reader) { + z.Header = Header{} + z.Pos = 0 + z.src = r + z.checksum.Reset() + z.data = nil + z.window = nil +} + +// WriteTo decompresses the data from the underlying io.Reader and writes it to the io.Writer. +// Returns the number of bytes written. +func (z *Reader) WriteTo(w io.Writer) (n int64, err error) { + cpus := runtime.GOMAXPROCS(0) + var buf []byte + + // The initial buffer being nil, the first Read will be only read the compressed frame options. + // The buffer can then be sized appropriately to support maximum concurrency decompression. + // If multiple frames are concatenated, Read() will return with no data decompressed but with + // potentially changed options. The buffer will be resized accordingly, always trying to + // maximize concurrency. + for { + nsize := 0 + // the block max size can change if multiple streams are concatenated. + // Check it after every Read(). + if z.BlockDependency { + // in case of dependency, we cannot decompress concurrently, + // so allocate the minimum buffer + window size + nsize = len(z.window) + z.BlockMaxSize + } else { + // if no dependency, allocate a buffer large enough for concurrent decompression + nsize = cpus * z.BlockMaxSize + } + if nsize != len(buf) { + buf = make([]byte, nsize) + } + + m, er := z.Read(buf) + if er != nil && er != io.EOF { + return n, er + } + m, err = w.Write(buf[:m]) + n += int64(m) + if err != nil || er == io.EOF { + return + } + } +} diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go new file mode 100644 index 0000000..b1b712f --- /dev/null +++ b/vendor/github.com/pierrec/lz4/writer.go @@ -0,0 +1,377 @@ +package lz4 + +import ( + "encoding/binary" + "fmt" + "hash" + "io" + "runtime" +) + +// Writer implements the LZ4 frame encoder. +type Writer struct { + Header + dst io.Writer + checksum hash.Hash32 // frame checksum + data []byte // data to be compressed, only used when dealing with block dependency as we need 64Kb to work with + window []byte // last 64KB of decompressed data (block dependency) + blockMaxSize buffer + + zbCompressBuf []byte // buffer for compressing lz4 blocks + writeSizeBuf []byte // four-byte slice for writing checksums and sizes in writeblock +} + +// NewWriter returns a new LZ4 frame encoder. +// No access to the underlying io.Writer is performed. +// The supplied Header is checked at the first Write. +// It is ok to change it before the first Write but then not until a Reset() is performed. +func NewWriter(dst io.Writer) *Writer { + return &Writer{ + dst: dst, + checksum: hashPool.Get(), + Header: Header{ + BlockMaxSize: 4 << 20, + }, + writeSizeBuf: make([]byte, 4), + } +} + +// writeHeader builds and writes the header (magic+header) to the underlying io.Writer. +func (z *Writer) writeHeader() error { + // Default to 4Mb if BlockMaxSize is not set + if z.Header.BlockMaxSize == 0 { + z.Header.BlockMaxSize = 4 << 20 + } + // the only option that need to be validated + bSize, ok := bsMapValue[z.Header.BlockMaxSize] + if !ok { + return fmt.Errorf("lz4: invalid block max size: %d", z.Header.BlockMaxSize) + } + + // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes + // Size and DictID are optional + var buf [19]byte + + // set the fixed size data: magic number, block max size and flags + binary.LittleEndian.PutUint32(buf[0:], frameMagic) + flg := byte(Version << 6) + if !z.Header.BlockDependency { + flg |= 1 << 5 + } + if z.Header.BlockChecksum { + flg |= 1 << 4 + } + if z.Header.Size > 0 { + flg |= 1 << 3 + } + if !z.Header.NoChecksum { + flg |= 1 << 2 + } + // if z.Header.Dict { + // flg |= 1 + // } + buf[4] = flg + buf[5] = bSize << 4 + + // current buffer size: magic(4) + flags(1) + block max size (1) + n := 6 + // optional items + if z.Header.Size > 0 { + binary.LittleEndian.PutUint64(buf[n:], z.Header.Size) + n += 8 + } + // if z.Header.Dict { + // binary.LittleEndian.PutUint32(buf[n:], z.Header.DictID) + // n += 4 + // } + + // header checksum includes the flags, block max size and optional Size and DictID + z.checksum.Write(buf[4:n]) + buf[n] = byte(z.checksum.Sum32() >> 8 & 0xFF) + z.checksum.Reset() + + // header ready, write it out + if _, err := z.dst.Write(buf[0 : n+1]); err != nil { + return err + } + z.Header.done = true + + // initialize buffers dependent on header info + z.zbCompressBuf = make([]byte, winSize+z.BlockMaxSize) + + return nil +} + +// Write compresses data from the supplied buffer into the underlying io.Writer. +// Write does not return until the data has been written. +// +// If the input buffer is large enough (typically in multiples of BlockMaxSize) +// the data will be compressed concurrently. +// +// Write never buffers any data unless in BlockDependency mode where it may +// do so until it has 64Kb of data, after which it never buffers any. +func (z *Writer) Write(buf []byte) (n int, err error) { + if !z.Header.done { + if err = z.writeHeader(); err != nil { + return + } + } + + if len(buf) == 0 { + return + } + + if !z.NoChecksum { + z.checksum.Write(buf) + } + + // with block dependency, require at least 64Kb of data to work with + // not having 64Kb only matters initially to setup the first window + bl := 0 + if z.BlockDependency && len(z.window) == 0 { + bl = len(z.data) + z.data = append(z.data, buf...) + if len(z.data) < winSize { + return len(buf), nil + } + buf = z.data + z.data = nil + } + + // Break up the input buffer into BlockMaxSize blocks, provisioning the left over block. + // Then compress into each of them concurrently if possible (no dependency). + var ( + zb block + wbuf = buf + zn = len(wbuf) / z.BlockMaxSize + zi = 0 + leftover = len(buf) % z.BlockMaxSize + ) + +loop: + for zi < zn { + if z.BlockDependency { + if zi == 0 { + // first block does not have the window + zb.data = append(z.window, wbuf[:z.BlockMaxSize]...) + zb.offset = len(z.window) + wbuf = wbuf[z.BlockMaxSize-winSize:] + } else { + // set the uncompressed data including the window from previous block + zb.data = wbuf[:z.BlockMaxSize+winSize] + zb.offset = winSize + wbuf = wbuf[z.BlockMaxSize:] + } + } else { + zb.data = wbuf[:z.BlockMaxSize] + wbuf = wbuf[z.BlockMaxSize:] + } + + goto write + } + + // left over + if leftover > 0 { + zb = block{data: wbuf} + if z.BlockDependency { + if zn == 0 { + zb.data = append(z.window, zb.data...) + zb.offset = len(z.window) + } else { + zb.offset = winSize + } + } + + leftover = 0 + goto write + } + + if z.BlockDependency { + if len(z.window) == 0 { + z.window = make([]byte, winSize) + } + // last buffer may be shorter than the window + if len(buf) >= winSize { + copy(z.window, buf[len(buf)-winSize:]) + } else { + copy(z.window, z.window[len(buf):]) + copy(z.window[len(buf)+1:], buf) + } + } + + return + +write: + zb = z.compressBlock(zb) + _, err = z.writeBlock(zb) + + written := len(zb.data) + if bl > 0 { + if written >= bl { + written -= bl + bl = 0 + } else { + bl -= written + written = 0 + } + } + + n += written + // remove the window in zb.data + if z.BlockDependency { + if zi == 0 { + n -= len(z.window) + } else { + n -= winSize + } + } + if err != nil { + return + } + zi++ + goto loop +} + +// compressBlock compresses a block. +func (z *Writer) compressBlock(zb block) block { + // compressed block size cannot exceed the input's + var ( + n int + err error + zbuf = z.zbCompressBuf + ) + if z.HighCompression { + n, err = CompressBlockHC(zb.data, zbuf, zb.offset) + } else { + n, err = CompressBlock(zb.data, zbuf, zb.offset) + } + + // compressible and compressed size smaller than decompressed: ok! + if err == nil && n > 0 && len(zb.zdata) < len(zb.data) { + zb.compressed = true + zb.zdata = zbuf[:n] + } else { + zb.compressed = false + zb.zdata = zb.data[zb.offset:] + } + + if z.BlockChecksum { + xxh := hashPool.Get() + xxh.Write(zb.zdata) + zb.checksum = xxh.Sum32() + hashPool.Put(xxh) + } + + return zb +} + +// writeBlock writes a frame block to the underlying io.Writer (size, data). +func (z *Writer) writeBlock(zb block) (int, error) { + bLen := uint32(len(zb.zdata)) + if !zb.compressed { + bLen |= 1 << 31 + } + + n := 0 + + binary.LittleEndian.PutUint32(z.writeSizeBuf, bLen) + n, err := z.dst.Write(z.writeSizeBuf) + if err != nil { + return n, err + } + + m, err := z.dst.Write(zb.zdata) + n += m + if err != nil { + return n, err + } + + if z.BlockChecksum { + binary.LittleEndian.PutUint32(z.writeSizeBuf, zb.checksum) + m, err := z.dst.Write(z.writeSizeBuf) + n += m + + if err != nil { + return n, err + } + } + + return n, nil +} + +// Flush flushes any pending compressed data to the underlying writer. +// Flush does not return until the data has been written. +// If the underlying writer returns an error, Flush returns that error. +// +// Flush is only required when in BlockDependency mode and the total of +// data written is less than 64Kb. +func (z *Writer) Flush() error { + if len(z.data) == 0 { + return nil + } + + zb := z.compressBlock(block{data: z.data}) + if _, err := z.writeBlock(zb); err != nil { + return err + } + return nil +} + +// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return err + } + } + + // buffered data for the block dependency window + if z.BlockDependency && len(z.data) > 0 { + zb := block{data: z.data} + if _, err := z.writeBlock(z.compressBlock(zb)); err != nil { + return err + } + } + + if err := binary.Write(z.dst, binary.LittleEndian, uint32(0)); err != nil { + return err + } + if !z.NoChecksum { + if err := binary.Write(z.dst, binary.LittleEndian, z.checksum.Sum32()); err != nil { + return err + } + } + return nil +} + +// Reset clears the state of the Writer z such that it is equivalent to its +// initial state from NewWriter, but instead writing to w. +// No access to the underlying io.Writer is performed. +func (z *Writer) Reset(w io.Writer) { + z.Header = Header{} + z.dst = w + z.checksum.Reset() + z.data = nil + z.window = nil +} + +// ReadFrom compresses the data read from the io.Reader and writes it to the underlying io.Writer. +// Returns the number of bytes read. +// It does not close the Writer. +func (z *Writer) ReadFrom(r io.Reader) (n int64, err error) { + cpus := runtime.GOMAXPROCS(0) + buf := make([]byte, cpus*z.BlockMaxSize) + for { + m, er := io.ReadFull(r, buf) + n += int64(m) + if er == nil || er == io.ErrUnexpectedEOF || er == io.EOF { + if _, err = z.Write(buf[:m]); err != nil { + return + } + if er == nil { + continue + } + return + } + return n, er + } +} diff --git a/vendor/github.com/pierrec/xxHash/LICENSE b/vendor/github.com/pierrec/xxHash/LICENSE new file mode 100644 index 0000000..c1418f3 --- /dev/null +++ b/vendor/github.com/pierrec/xxHash/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2014, Pierre Curto +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of xxHash nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go b/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go new file mode 100644 index 0000000..ff58256 --- /dev/null +++ b/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go @@ -0,0 +1,212 @@ +// Package xxHash32 implements the very fast xxHash hashing algorithm (32 bits version). +// (https://github.com/Cyan4973/xxHash/) +package xxHash32 + +import "hash" + +const ( + prime32_1 = 2654435761 + prime32_2 = 2246822519 + prime32_3 = 3266489917 + prime32_4 = 668265263 + prime32_5 = 374761393 +) + +type xxHash struct { + seed uint32 + v1 uint32 + v2 uint32 + v3 uint32 + v4 uint32 + totalLen uint64 + buf [16]byte + bufused int +} + +// New returns a new Hash32 instance. +func New(seed uint32) hash.Hash32 { + xxh := &xxHash{seed: seed} + xxh.Reset() + return xxh +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (xxh xxHash) Sum(b []byte) []byte { + h32 := xxh.Sum32() + return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) +} + +// Reset resets the Hash to its initial state. +func (xxh *xxHash) Reset() { + xxh.v1 = xxh.seed + prime32_1 + prime32_2 + xxh.v2 = xxh.seed + prime32_2 + xxh.v3 = xxh.seed + xxh.v4 = xxh.seed - prime32_1 + xxh.totalLen = 0 + xxh.bufused = 0 +} + +// Size returns the number of bytes returned by Sum(). +func (xxh *xxHash) Size() int { + return 4 +} + +// BlockSize gives the minimum number of bytes accepted by Write(). +func (xxh *xxHash) BlockSize() int { + return 1 +} + +// Write adds input bytes to the Hash. +// It never returns an error. +func (xxh *xxHash) Write(input []byte) (int, error) { + n := len(input) + m := xxh.bufused + + xxh.totalLen += uint64(n) + + r := len(xxh.buf) - m + if n < r { + copy(xxh.buf[m:], input) + xxh.bufused += len(input) + return n, nil + } + + p := 0 + if m > 0 { + // some data left from previous update + copy(xxh.buf[xxh.bufused:], input[:r]) + xxh.bufused += len(input) - r + + // fast rotl(13) + xxh.v1 = rol13(xxh.v1+u32(xxh.buf[:])*prime32_2) * prime32_1 + xxh.v2 = rol13(xxh.v2+u32(xxh.buf[4:])*prime32_2) * prime32_1 + xxh.v3 = rol13(xxh.v3+u32(xxh.buf[8:])*prime32_2) * prime32_1 + xxh.v4 = rol13(xxh.v4+u32(xxh.buf[12:])*prime32_2) * prime32_1 + p = r + xxh.bufused = 0 + } + + // Causes compiler to work directly from registers instead of stack: + v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4 + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+u32(sub[:])*prime32_2) * prime32_1 + v2 = rol13(v2+u32(sub[4:])*prime32_2) * prime32_1 + v3 = rol13(v3+u32(sub[8:])*prime32_2) * prime32_1 + v4 = rol13(v4+u32(sub[12:])*prime32_2) * prime32_1 + } + xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4 + + copy(xxh.buf[xxh.bufused:], input[p:]) + xxh.bufused += len(input) - p + + return n, nil +} + +// Sum32 returns the 32 bits Hash value. +func (xxh *xxHash) Sum32() uint32 { + h32 := uint32(xxh.totalLen) + if xxh.totalLen >= 16 { + h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4) + } else { + h32 += xxh.seed + prime32_5 + } + + p := 0 + n := xxh.bufused + for n := n - 4; p <= n; p += 4 { + h32 += u32(xxh.buf[p:p+4]) * prime32_3 + h32 = rol17(h32) * prime32_4 + } + for ; p < n; p++ { + h32 += uint32(xxh.buf[p]) * prime32_5 + h32 = rol11(h32) * prime32_1 + } + + h32 ^= h32 >> 15 + h32 *= prime32_2 + h32 ^= h32 >> 13 + h32 *= prime32_3 + h32 ^= h32 >> 16 + + return h32 +} + +// Checksum returns the 32bits Hash value. +func Checksum(input []byte, seed uint32) uint32 { + n := len(input) + h32 := uint32(n) + + if n < 16 { + h32 += seed + prime32_5 + } else { + v1 := seed + prime32_1 + prime32_2 + v2 := seed + prime32_2 + v3 := seed + v4 := seed - prime32_1 + p := 0 + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+u32(sub[:])*prime32_2) * prime32_1 + v2 = rol13(v2+u32(sub[4:])*prime32_2) * prime32_1 + v3 = rol13(v3+u32(sub[8:])*prime32_2) * prime32_1 + v4 = rol13(v4+u32(sub[12:])*prime32_2) * prime32_1 + } + input = input[p:] + n -= p + h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + } + + p := 0 + for n := n - 4; p <= n; p += 4 { + h32 += u32(input[p:p+4]) * prime32_3 + h32 = rol17(h32) * prime32_4 + } + for p < n { + h32 += uint32(input[p]) * prime32_5 + h32 = rol11(h32) * prime32_1 + p++ + } + + h32 ^= h32 >> 15 + h32 *= prime32_2 + h32 ^= h32 >> 13 + h32 *= prime32_3 + h32 ^= h32 >> 16 + + return h32 +} + +func u32(buf []byte) uint32 { + // go compiler recognizes this pattern and optimizes it on little endian platforms + return uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 +} + +func rol1(u uint32) uint32 { + return u<<1 | u>>31 +} + +func rol7(u uint32) uint32 { + return u<<7 | u>>25 +} + +func rol11(u uint32) uint32 { + return u<<11 | u>>21 +} + +func rol12(u uint32) uint32 { + return u<<12 | u>>20 +} + +func rol13(u uint32) uint32 { + return u<<13 | u>>19 +} + +func rol17(u uint32) uint32 { + return u<<17 | u>>15 +} + +func rol18(u uint32) uint32 { + return u<<18 | u>>14 +} diff --git a/vendor/github.com/rcrowley/go-metrics/.gitignore b/vendor/github.com/rcrowley/go-metrics/.gitignore new file mode 100644 index 0000000..83c8f82 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/.gitignore @@ -0,0 +1,9 @@ +*.[68] +*.a +*.out +*.swp +_obj +_testmain.go +cmd/metrics-bench/metrics-bench +cmd/metrics-example/metrics-example +cmd/never-read/never-read diff --git a/vendor/github.com/rcrowley/go-metrics/.travis.yml b/vendor/github.com/rcrowley/go-metrics/.travis.yml new file mode 100644 index 0000000..f8b3b2e --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go: + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - 1.8 + - 1.9 + +script: + - ./validate.sh + +# this should give us faster builds according to +# http://docs.travis-ci.com/user/migrating-from-legacy/ +sudo: false diff --git a/vendor/github.com/rcrowley/go-metrics/LICENSE b/vendor/github.com/rcrowley/go-metrics/LICENSE new file mode 100644 index 0000000..363fa9e --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/LICENSE @@ -0,0 +1,29 @@ +Copyright 2012 Richard Crowley. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation +are those of the authors and should not be interpreted as representing +official policies, either expressed or implied, of Richard Crowley. diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md new file mode 100644 index 0000000..bc2a45a --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/README.md @@ -0,0 +1,166 @@ +go-metrics +========== + +![travis build status](https://travis-ci.org/rcrowley/go-metrics.svg?branch=master) + +Go port of Coda Hale's Metrics library: . + +Documentation: . + +Usage +----- + +Create and update metrics: + +```go +c := metrics.NewCounter() +metrics.Register("foo", c) +c.Inc(47) + +g := metrics.NewGauge() +metrics.Register("bar", g) +g.Update(47) + +r := NewRegistry() +g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() }) + +s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028) +h := metrics.NewHistogram(s) +metrics.Register("baz", h) +h.Update(47) + +m := metrics.NewMeter() +metrics.Register("quux", m) +m.Mark(47) + +t := metrics.NewTimer() +metrics.Register("bang", t) +t.Time(func() {}) +t.Update(47) +``` + +Register() is not threadsafe. For threadsafe metric registration use +GetOrRegister: + +```go +t := metrics.GetOrRegisterTimer("account.create.latency", nil) +t.Time(func() {}) +t.Update(47) +``` + +**NOTE:** Be sure to unregister short-lived meters and timers otherwise they will +leak memory: + +```go +// Will call Stop() on the Meter to allow for garbage collection +metrics.Unregister("quux") +// Or similarly for a Timer that embeds a Meter +metrics.Unregister("bang") +``` + +Periodically log every metric in human-readable form to standard error: + +```go +go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds)) +``` + +Periodically log every metric in slightly-more-parseable form to syslog: + +```go +w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics") +go metrics.Syslog(metrics.DefaultRegistry, 60e9, w) +``` + +Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite): + +```go + +import "github.com/cyberdelia/go-metrics-graphite" + +addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003") +go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr) +``` + +Periodically emit every metric into InfluxDB: + +**NOTE:** this has been pulled out of the library due to constant fluctuations +in the InfluxDB API. In fact, all client libraries are on their way out. see +issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and +[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details. + +```go +import "github.com/vrischmann/go-metrics-influxdb" + +go influxdb.InfluxDB(metrics.DefaultRegistry, + 10e9, + "127.0.0.1:8086", + "database-name", + "username", + "password" +) +``` + +Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato): + +**Note**: the client included with this repository under the `librato` package +has been deprecated and moved to the repository linked above. + +```go +import "github.com/mihasya/go-metrics-librato" + +go librato.Librato(metrics.DefaultRegistry, + 10e9, // interval + "example@example.com", // account owner email address + "token", // Librato API token + "hostname", // source + []float64{0.95}, // percentiles to send + time.Millisecond, // time unit +) +``` + +Periodically emit every metric to StatHat: + +```go +import "github.com/rcrowley/go-metrics/stathat" + +go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com") +``` + +Maintain all metrics along with expvars at `/debug/metrics`: + +This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/) +but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars +as well as all your go-metrics. + + +```go +import "github.com/rcrowley/go-metrics/exp" + +exp.Exp(metrics.DefaultRegistry) +``` + +Installation +------------ + +```sh +go get github.com/rcrowley/go-metrics +``` + +StatHat support additionally requires their Go client: + +```sh +go get github.com/stathat/go +``` + +Publishing Metrics +------------------ + +Clients are available for the following destinations: + +* Librato - https://github.com/mihasya/go-metrics-librato +* Graphite - https://github.com/cyberdelia/go-metrics-graphite +* InfluxDB - https://github.com/vrischmann/go-metrics-influxdb +* Ganglia - https://github.com/appscode/metlia +* Prometheus - https://github.com/deathowl/go-metrics-prometheus +* DataDog - https://github.com/syntaqx/go-metrics-datadog +* SignalFX - https://github.com/pascallouisperez/go-metrics-signalfx diff --git a/vendor/github.com/rcrowley/go-metrics/counter.go b/vendor/github.com/rcrowley/go-metrics/counter.go new file mode 100644 index 0000000..bb7b039 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/counter.go @@ -0,0 +1,112 @@ +package metrics + +import "sync/atomic" + +// Counters hold an int64 value that can be incremented and decremented. +type Counter interface { + Clear() + Count() int64 + Dec(int64) + Inc(int64) + Snapshot() Counter +} + +// GetOrRegisterCounter returns an existing Counter or constructs and registers +// a new StandardCounter. +func GetOrRegisterCounter(name string, r Registry) Counter { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewCounter).(Counter) +} + +// NewCounter constructs a new StandardCounter. +func NewCounter() Counter { + if UseNilMetrics { + return NilCounter{} + } + return &StandardCounter{0} +} + +// NewRegisteredCounter constructs and registers a new StandardCounter. +func NewRegisteredCounter(name string, r Registry) Counter { + c := NewCounter() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// CounterSnapshot is a read-only copy of another Counter. +type CounterSnapshot int64 + +// Clear panics. +func (CounterSnapshot) Clear() { + panic("Clear called on a CounterSnapshot") +} + +// Count returns the count at the time the snapshot was taken. +func (c CounterSnapshot) Count() int64 { return int64(c) } + +// Dec panics. +func (CounterSnapshot) Dec(int64) { + panic("Dec called on a CounterSnapshot") +} + +// Inc panics. +func (CounterSnapshot) Inc(int64) { + panic("Inc called on a CounterSnapshot") +} + +// Snapshot returns the snapshot. +func (c CounterSnapshot) Snapshot() Counter { return c } + +// NilCounter is a no-op Counter. +type NilCounter struct{} + +// Clear is a no-op. +func (NilCounter) Clear() {} + +// Count is a no-op. +func (NilCounter) Count() int64 { return 0 } + +// Dec is a no-op. +func (NilCounter) Dec(i int64) {} + +// Inc is a no-op. +func (NilCounter) Inc(i int64) {} + +// Snapshot is a no-op. +func (NilCounter) Snapshot() Counter { return NilCounter{} } + +// StandardCounter is the standard implementation of a Counter and uses the +// sync/atomic package to manage a single int64 value. +type StandardCounter struct { + count int64 +} + +// Clear sets the counter to zero. +func (c *StandardCounter) Clear() { + atomic.StoreInt64(&c.count, 0) +} + +// Count returns the current count. +func (c *StandardCounter) Count() int64 { + return atomic.LoadInt64(&c.count) +} + +// Dec decrements the counter by the given amount. +func (c *StandardCounter) Dec(i int64) { + atomic.AddInt64(&c.count, -i) +} + +// Inc increments the counter by the given amount. +func (c *StandardCounter) Inc(i int64) { + atomic.AddInt64(&c.count, i) +} + +// Snapshot returns a read-only copy of the counter. +func (c *StandardCounter) Snapshot() Counter { + return CounterSnapshot(c.Count()) +} diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go new file mode 100644 index 0000000..043ccef --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/debug.go @@ -0,0 +1,76 @@ +package metrics + +import ( + "runtime/debug" + "time" +) + +var ( + debugMetrics struct { + GCStats struct { + LastGC Gauge + NumGC Gauge + Pause Histogram + //PauseQuantiles Histogram + PauseTotal Gauge + } + ReadGCStats Timer + } + gcStats debug.GCStats +) + +// Capture new values for the Go garbage collector statistics exported in +// debug.GCStats. This is designed to be called as a goroutine. +func CaptureDebugGCStats(r Registry, d time.Duration) { + for _ = range time.Tick(d) { + CaptureDebugGCStatsOnce(r) + } +} + +// Capture new values for the Go garbage collector statistics exported in +// debug.GCStats. This is designed to be called in a background goroutine. +// Giving a registry which has not been given to RegisterDebugGCStats will +// panic. +// +// Be careful (but much less so) with this because debug.ReadGCStats calls +// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world +// operation, isn't something you want to be doing all the time. +func CaptureDebugGCStatsOnce(r Registry) { + lastGC := gcStats.LastGC + t := time.Now() + debug.ReadGCStats(&gcStats) + debugMetrics.ReadGCStats.UpdateSince(t) + + debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano())) + debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC)) + if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) { + debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0])) + } + //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles) + debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal)) +} + +// Register metrics for the Go garbage collector statistics exported in +// debug.GCStats. The metrics are named by their fully-qualified Go symbols, +// i.e. debug.GCStats.PauseTotal. +func RegisterDebugGCStats(r Registry) { + debugMetrics.GCStats.LastGC = NewGauge() + debugMetrics.GCStats.NumGC = NewGauge() + debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015)) + //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015)) + debugMetrics.GCStats.PauseTotal = NewGauge() + debugMetrics.ReadGCStats = NewTimer() + + r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC) + r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC) + r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause) + //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles) + r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal) + r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats) +} + +// Allocate an initial slice for gcStats.Pause to avoid allocations during +// normal operation. +func init() { + gcStats.Pause = make([]time.Duration, 11) +} diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go new file mode 100644 index 0000000..694a1d0 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/ewma.go @@ -0,0 +1,118 @@ +package metrics + +import ( + "math" + "sync" + "sync/atomic" +) + +// EWMAs continuously calculate an exponentially-weighted moving average +// based on an outside source of clock ticks. +type EWMA interface { + Rate() float64 + Snapshot() EWMA + Tick() + Update(int64) +} + +// NewEWMA constructs a new EWMA with the given alpha. +func NewEWMA(alpha float64) EWMA { + if UseNilMetrics { + return NilEWMA{} + } + return &StandardEWMA{alpha: alpha} +} + +// NewEWMA1 constructs a new EWMA for a one-minute moving average. +func NewEWMA1() EWMA { + return NewEWMA(1 - math.Exp(-5.0/60.0/1)) +} + +// NewEWMA5 constructs a new EWMA for a five-minute moving average. +func NewEWMA5() EWMA { + return NewEWMA(1 - math.Exp(-5.0/60.0/5)) +} + +// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average. +func NewEWMA15() EWMA { + return NewEWMA(1 - math.Exp(-5.0/60.0/15)) +} + +// EWMASnapshot is a read-only copy of another EWMA. +type EWMASnapshot float64 + +// Rate returns the rate of events per second at the time the snapshot was +// taken. +func (a EWMASnapshot) Rate() float64 { return float64(a) } + +// Snapshot returns the snapshot. +func (a EWMASnapshot) Snapshot() EWMA { return a } + +// Tick panics. +func (EWMASnapshot) Tick() { + panic("Tick called on an EWMASnapshot") +} + +// Update panics. +func (EWMASnapshot) Update(int64) { + panic("Update called on an EWMASnapshot") +} + +// NilEWMA is a no-op EWMA. +type NilEWMA struct{} + +// Rate is a no-op. +func (NilEWMA) Rate() float64 { return 0.0 } + +// Snapshot is a no-op. +func (NilEWMA) Snapshot() EWMA { return NilEWMA{} } + +// Tick is a no-op. +func (NilEWMA) Tick() {} + +// Update is a no-op. +func (NilEWMA) Update(n int64) {} + +// StandardEWMA is the standard implementation of an EWMA and tracks the number +// of uncounted events and processes them on each tick. It uses the +// sync/atomic package to manage uncounted events. +type StandardEWMA struct { + uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment + alpha float64 + rate float64 + init bool + mutex sync.Mutex +} + +// Rate returns the moving average rate of events per second. +func (a *StandardEWMA) Rate() float64 { + a.mutex.Lock() + defer a.mutex.Unlock() + return a.rate * float64(1e9) +} + +// Snapshot returns a read-only copy of the EWMA. +func (a *StandardEWMA) Snapshot() EWMA { + return EWMASnapshot(a.Rate()) +} + +// Tick ticks the clock to update the moving average. It assumes it is called +// every five seconds. +func (a *StandardEWMA) Tick() { + count := atomic.LoadInt64(&a.uncounted) + atomic.AddInt64(&a.uncounted, -count) + instantRate := float64(count) / float64(5e9) + a.mutex.Lock() + defer a.mutex.Unlock() + if a.init { + a.rate += a.alpha * (instantRate - a.rate) + } else { + a.init = true + a.rate = instantRate + } +} + +// Update adds n uncounted events. +func (a *StandardEWMA) Update(n int64) { + atomic.AddInt64(&a.uncounted, n) +} diff --git a/vendor/github.com/rcrowley/go-metrics/gauge.go b/vendor/github.com/rcrowley/go-metrics/gauge.go new file mode 100644 index 0000000..cb57a93 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/gauge.go @@ -0,0 +1,120 @@ +package metrics + +import "sync/atomic" + +// Gauges hold an int64 value that can be set arbitrarily. +type Gauge interface { + Snapshot() Gauge + Update(int64) + Value() int64 +} + +// GetOrRegisterGauge returns an existing Gauge or constructs and registers a +// new StandardGauge. +func GetOrRegisterGauge(name string, r Registry) Gauge { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewGauge).(Gauge) +} + +// NewGauge constructs a new StandardGauge. +func NewGauge() Gauge { + if UseNilMetrics { + return NilGauge{} + } + return &StandardGauge{0} +} + +// NewRegisteredGauge constructs and registers a new StandardGauge. +func NewRegisteredGauge(name string, r Registry) Gauge { + c := NewGauge() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// NewFunctionalGauge constructs a new FunctionalGauge. +func NewFunctionalGauge(f func() int64) Gauge { + if UseNilMetrics { + return NilGauge{} + } + return &FunctionalGauge{value: f} +} + +// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. +func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge { + c := NewFunctionalGauge(f) + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// GaugeSnapshot is a read-only copy of another Gauge. +type GaugeSnapshot int64 + +// Snapshot returns the snapshot. +func (g GaugeSnapshot) Snapshot() Gauge { return g } + +// Update panics. +func (GaugeSnapshot) Update(int64) { + panic("Update called on a GaugeSnapshot") +} + +// Value returns the value at the time the snapshot was taken. +func (g GaugeSnapshot) Value() int64 { return int64(g) } + +// NilGauge is a no-op Gauge. +type NilGauge struct{} + +// Snapshot is a no-op. +func (NilGauge) Snapshot() Gauge { return NilGauge{} } + +// Update is a no-op. +func (NilGauge) Update(v int64) {} + +// Value is a no-op. +func (NilGauge) Value() int64 { return 0 } + +// StandardGauge is the standard implementation of a Gauge and uses the +// sync/atomic package to manage a single int64 value. +type StandardGauge struct { + value int64 +} + +// Snapshot returns a read-only copy of the gauge. +func (g *StandardGauge) Snapshot() Gauge { + return GaugeSnapshot(g.Value()) +} + +// Update updates the gauge's value. +func (g *StandardGauge) Update(v int64) { + atomic.StoreInt64(&g.value, v) +} + +// Value returns the gauge's current value. +func (g *StandardGauge) Value() int64 { + return atomic.LoadInt64(&g.value) +} + +// FunctionalGauge returns value from given function +type FunctionalGauge struct { + value func() int64 +} + +// Value returns the gauge's current value. +func (g FunctionalGauge) Value() int64 { + return g.value() +} + +// Snapshot returns the snapshot. +func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) } + +// Update panics. +func (FunctionalGauge) Update(int64) { + panic("Update called on a FunctionalGauge") +} diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go new file mode 100644 index 0000000..6f93920 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go @@ -0,0 +1,127 @@ +package metrics + +import "sync" + +// GaugeFloat64s hold a float64 value that can be set arbitrarily. +type GaugeFloat64 interface { + Snapshot() GaugeFloat64 + Update(float64) + Value() float64 +} + +// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a +// new StandardGaugeFloat64. +func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64) +} + +// NewGaugeFloat64 constructs a new StandardGaugeFloat64. +func NewGaugeFloat64() GaugeFloat64 { + if UseNilMetrics { + return NilGaugeFloat64{} + } + return &StandardGaugeFloat64{ + value: 0.0, + } +} + +// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64. +func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 { + c := NewGaugeFloat64() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// NewFunctionalGauge constructs a new FunctionalGauge. +func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 { + if UseNilMetrics { + return NilGaugeFloat64{} + } + return &FunctionalGaugeFloat64{value: f} +} + +// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. +func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 { + c := NewFunctionalGaugeFloat64(f) + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64. +type GaugeFloat64Snapshot float64 + +// Snapshot returns the snapshot. +func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g } + +// Update panics. +func (GaugeFloat64Snapshot) Update(float64) { + panic("Update called on a GaugeFloat64Snapshot") +} + +// Value returns the value at the time the snapshot was taken. +func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) } + +// NilGauge is a no-op Gauge. +type NilGaugeFloat64 struct{} + +// Snapshot is a no-op. +func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} } + +// Update is a no-op. +func (NilGaugeFloat64) Update(v float64) {} + +// Value is a no-op. +func (NilGaugeFloat64) Value() float64 { return 0.0 } + +// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses +// sync.Mutex to manage a single float64 value. +type StandardGaugeFloat64 struct { + mutex sync.Mutex + value float64 +} + +// Snapshot returns a read-only copy of the gauge. +func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 { + return GaugeFloat64Snapshot(g.Value()) +} + +// Update updates the gauge's value. +func (g *StandardGaugeFloat64) Update(v float64) { + g.mutex.Lock() + defer g.mutex.Unlock() + g.value = v +} + +// Value returns the gauge's current value. +func (g *StandardGaugeFloat64) Value() float64 { + g.mutex.Lock() + defer g.mutex.Unlock() + return g.value +} + +// FunctionalGaugeFloat64 returns value from given function +type FunctionalGaugeFloat64 struct { + value func() float64 +} + +// Value returns the gauge's current value. +func (g FunctionalGaugeFloat64) Value() float64 { + return g.value() +} + +// Snapshot returns the snapshot. +func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) } + +// Update panics. +func (FunctionalGaugeFloat64) Update(float64) { + panic("Update called on a FunctionalGaugeFloat64") +} diff --git a/vendor/github.com/rcrowley/go-metrics/graphite.go b/vendor/github.com/rcrowley/go-metrics/graphite.go new file mode 100644 index 0000000..abd0a7d --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/graphite.go @@ -0,0 +1,113 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "strconv" + "strings" + "time" +) + +// GraphiteConfig provides a container with configuration parameters for +// the Graphite exporter +type GraphiteConfig struct { + Addr *net.TCPAddr // Network address to connect to + Registry Registry // Registry to be exported + FlushInterval time.Duration // Flush interval + DurationUnit time.Duration // Time conversion unit for durations + Prefix string // Prefix to be prepended to metric names + Percentiles []float64 // Percentiles to export from timers and histograms +} + +// Graphite is a blocking exporter function which reports metrics in r +// to a graphite server located at addr, flushing them every d duration +// and prepending metric names with prefix. +func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { + GraphiteWithConfig(GraphiteConfig{ + Addr: addr, + Registry: r, + FlushInterval: d, + DurationUnit: time.Nanosecond, + Prefix: prefix, + Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999}, + }) +} + +// GraphiteWithConfig is a blocking exporter function just like Graphite, +// but it takes a GraphiteConfig instead. +func GraphiteWithConfig(c GraphiteConfig) { + log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015") + for _ = range time.Tick(c.FlushInterval) { + if err := graphite(&c); nil != err { + log.Println(err) + } + } +} + +// GraphiteOnce performs a single submission to Graphite, returning a +// non-nil error on failed connections. This can be used in a loop +// similar to GraphiteWithConfig for custom error handling. +func GraphiteOnce(c GraphiteConfig) error { + log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015") + return graphite(&c) +} + +func graphite(c *GraphiteConfig) error { + now := time.Now().Unix() + du := float64(c.DurationUnit) + conn, err := net.DialTCP("tcp", nil, c.Addr) + if nil != err { + return err + } + defer conn.Close() + w := bufio.NewWriter(conn) + c.Registry.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now) + case Gauge: + fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now) + case GaugeFloat64: + fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles(c.Percentiles) + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now) + fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now) + fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now) + fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now) + for psIdx, psKey := range c.Percentiles { + key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) + fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) + } + case Meter: + m := metric.Snapshot() + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now) + fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now) + fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now) + fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles(c.Percentiles) + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now) + fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now) + fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now) + fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now) + for psIdx, psKey := range c.Percentiles { + key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) + fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) + } + fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now) + fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now) + fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now) + fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now) + } + w.Flush() + }) + return nil +} diff --git a/vendor/github.com/rcrowley/go-metrics/healthcheck.go b/vendor/github.com/rcrowley/go-metrics/healthcheck.go new file mode 100644 index 0000000..445131c --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/healthcheck.go @@ -0,0 +1,61 @@ +package metrics + +// Healthchecks hold an error value describing an arbitrary up/down status. +type Healthcheck interface { + Check() + Error() error + Healthy() + Unhealthy(error) +} + +// NewHealthcheck constructs a new Healthcheck which will use the given +// function to update its status. +func NewHealthcheck(f func(Healthcheck)) Healthcheck { + if UseNilMetrics { + return NilHealthcheck{} + } + return &StandardHealthcheck{nil, f} +} + +// NilHealthcheck is a no-op. +type NilHealthcheck struct{} + +// Check is a no-op. +func (NilHealthcheck) Check() {} + +// Error is a no-op. +func (NilHealthcheck) Error() error { return nil } + +// Healthy is a no-op. +func (NilHealthcheck) Healthy() {} + +// Unhealthy is a no-op. +func (NilHealthcheck) Unhealthy(error) {} + +// StandardHealthcheck is the standard implementation of a Healthcheck and +// stores the status and a function to call to update the status. +type StandardHealthcheck struct { + err error + f func(Healthcheck) +} + +// Check runs the healthcheck function to update the healthcheck's status. +func (h *StandardHealthcheck) Check() { + h.f(h) +} + +// Error returns the healthcheck's status, which will be nil if it is healthy. +func (h *StandardHealthcheck) Error() error { + return h.err +} + +// Healthy marks the healthcheck as healthy. +func (h *StandardHealthcheck) Healthy() { + h.err = nil +} + +// Unhealthy marks the healthcheck as unhealthy. The error is stored and +// may be retrieved by the Error method. +func (h *StandardHealthcheck) Unhealthy(err error) { + h.err = err +} diff --git a/vendor/github.com/rcrowley/go-metrics/histogram.go b/vendor/github.com/rcrowley/go-metrics/histogram.go new file mode 100644 index 0000000..dbc837f --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/histogram.go @@ -0,0 +1,202 @@ +package metrics + +// Histograms calculate distribution statistics from a series of int64 values. +type Histogram interface { + Clear() + Count() int64 + Max() int64 + Mean() float64 + Min() int64 + Percentile(float64) float64 + Percentiles([]float64) []float64 + Sample() Sample + Snapshot() Histogram + StdDev() float64 + Sum() int64 + Update(int64) + Variance() float64 +} + +// GetOrRegisterHistogram returns an existing Histogram or constructs and +// registers a new StandardHistogram. +func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram) +} + +// NewHistogram constructs a new StandardHistogram from a Sample. +func NewHistogram(s Sample) Histogram { + if UseNilMetrics { + return NilHistogram{} + } + return &StandardHistogram{sample: s} +} + +// NewRegisteredHistogram constructs and registers a new StandardHistogram from +// a Sample. +func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram { + c := NewHistogram(s) + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// HistogramSnapshot is a read-only copy of another Histogram. +type HistogramSnapshot struct { + sample *SampleSnapshot +} + +// Clear panics. +func (*HistogramSnapshot) Clear() { + panic("Clear called on a HistogramSnapshot") +} + +// Count returns the number of samples recorded at the time the snapshot was +// taken. +func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() } + +// Max returns the maximum value in the sample at the time the snapshot was +// taken. +func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() } + +// Mean returns the mean of the values in the sample at the time the snapshot +// was taken. +func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() } + +// Min returns the minimum value in the sample at the time the snapshot was +// taken. +func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() } + +// Percentile returns an arbitrary percentile of values in the sample at the +// time the snapshot was taken. +func (h *HistogramSnapshot) Percentile(p float64) float64 { + return h.sample.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of values in the sample +// at the time the snapshot was taken. +func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 { + return h.sample.Percentiles(ps) +} + +// Sample returns the Sample underlying the histogram. +func (h *HistogramSnapshot) Sample() Sample { return h.sample } + +// Snapshot returns the snapshot. +func (h *HistogramSnapshot) Snapshot() Histogram { return h } + +// StdDev returns the standard deviation of the values in the sample at the +// time the snapshot was taken. +func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() } + +// Sum returns the sum in the sample at the time the snapshot was taken. +func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() } + +// Update panics. +func (*HistogramSnapshot) Update(int64) { + panic("Update called on a HistogramSnapshot") +} + +// Variance returns the variance of inputs at the time the snapshot was taken. +func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() } + +// NilHistogram is a no-op Histogram. +type NilHistogram struct{} + +// Clear is a no-op. +func (NilHistogram) Clear() {} + +// Count is a no-op. +func (NilHistogram) Count() int64 { return 0 } + +// Max is a no-op. +func (NilHistogram) Max() int64 { return 0 } + +// Mean is a no-op. +func (NilHistogram) Mean() float64 { return 0.0 } + +// Min is a no-op. +func (NilHistogram) Min() int64 { return 0 } + +// Percentile is a no-op. +func (NilHistogram) Percentile(p float64) float64 { return 0.0 } + +// Percentiles is a no-op. +func (NilHistogram) Percentiles(ps []float64) []float64 { + return make([]float64, len(ps)) +} + +// Sample is a no-op. +func (NilHistogram) Sample() Sample { return NilSample{} } + +// Snapshot is a no-op. +func (NilHistogram) Snapshot() Histogram { return NilHistogram{} } + +// StdDev is a no-op. +func (NilHistogram) StdDev() float64 { return 0.0 } + +// Sum is a no-op. +func (NilHistogram) Sum() int64 { return 0 } + +// Update is a no-op. +func (NilHistogram) Update(v int64) {} + +// Variance is a no-op. +func (NilHistogram) Variance() float64 { return 0.0 } + +// StandardHistogram is the standard implementation of a Histogram and uses a +// Sample to bound its memory use. +type StandardHistogram struct { + sample Sample +} + +// Clear clears the histogram and its sample. +func (h *StandardHistogram) Clear() { h.sample.Clear() } + +// Count returns the number of samples recorded since the histogram was last +// cleared. +func (h *StandardHistogram) Count() int64 { return h.sample.Count() } + +// Max returns the maximum value in the sample. +func (h *StandardHistogram) Max() int64 { return h.sample.Max() } + +// Mean returns the mean of the values in the sample. +func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() } + +// Min returns the minimum value in the sample. +func (h *StandardHistogram) Min() int64 { return h.sample.Min() } + +// Percentile returns an arbitrary percentile of the values in the sample. +func (h *StandardHistogram) Percentile(p float64) float64 { + return h.sample.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of the values in the +// sample. +func (h *StandardHistogram) Percentiles(ps []float64) []float64 { + return h.sample.Percentiles(ps) +} + +// Sample returns the Sample underlying the histogram. +func (h *StandardHistogram) Sample() Sample { return h.sample } + +// Snapshot returns a read-only copy of the histogram. +func (h *StandardHistogram) Snapshot() Histogram { + return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)} +} + +// StdDev returns the standard deviation of the values in the sample. +func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() } + +// Sum returns the sum in the sample. +func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() } + +// Update samples a new value. +func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) } + +// Variance returns the variance of the values in the sample. +func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() } diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go new file mode 100644 index 0000000..174b947 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/json.go @@ -0,0 +1,31 @@ +package metrics + +import ( + "encoding/json" + "io" + "time" +) + +// MarshalJSON returns a byte slice containing a JSON representation of all +// the metrics in the Registry. +func (r *StandardRegistry) MarshalJSON() ([]byte, error) { + return json.Marshal(r.GetAll()) +} + +// WriteJSON writes metrics from the given registry periodically to the +// specified io.Writer as JSON. +func WriteJSON(r Registry, d time.Duration, w io.Writer) { + for _ = range time.Tick(d) { + WriteJSONOnce(r, w) + } +} + +// WriteJSONOnce writes metrics from the given registry to the specified +// io.Writer as JSON. +func WriteJSONOnce(r Registry, w io.Writer) { + json.NewEncoder(w).Encode(r) +} + +func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) { + return json.Marshal(p.GetAll()) +} diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go new file mode 100644 index 0000000..f8074c0 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/log.go @@ -0,0 +1,80 @@ +package metrics + +import ( + "time" +) + +type Logger interface { + Printf(format string, v ...interface{}) +} + +func Log(r Registry, freq time.Duration, l Logger) { + LogScaled(r, freq, time.Nanosecond, l) +} + +// Output each metric in the given registry periodically using the given +// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos. +func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) { + du := float64(scale) + duSuffix := scale.String()[1:] + + for _ = range time.Tick(freq) { + r.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + l.Printf("counter %s\n", name) + l.Printf(" count: %9d\n", metric.Count()) + case Gauge: + l.Printf("gauge %s\n", name) + l.Printf(" value: %9d\n", metric.Value()) + case GaugeFloat64: + l.Printf("gauge %s\n", name) + l.Printf(" value: %f\n", metric.Value()) + case Healthcheck: + metric.Check() + l.Printf("healthcheck %s\n", name) + l.Printf(" error: %v\n", metric.Error()) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + l.Printf("histogram %s\n", name) + l.Printf(" count: %9d\n", h.Count()) + l.Printf(" min: %9d\n", h.Min()) + l.Printf(" max: %9d\n", h.Max()) + l.Printf(" mean: %12.2f\n", h.Mean()) + l.Printf(" stddev: %12.2f\n", h.StdDev()) + l.Printf(" median: %12.2f\n", ps[0]) + l.Printf(" 75%%: %12.2f\n", ps[1]) + l.Printf(" 95%%: %12.2f\n", ps[2]) + l.Printf(" 99%%: %12.2f\n", ps[3]) + l.Printf(" 99.9%%: %12.2f\n", ps[4]) + case Meter: + m := metric.Snapshot() + l.Printf("meter %s\n", name) + l.Printf(" count: %9d\n", m.Count()) + l.Printf(" 1-min rate: %12.2f\n", m.Rate1()) + l.Printf(" 5-min rate: %12.2f\n", m.Rate5()) + l.Printf(" 15-min rate: %12.2f\n", m.Rate15()) + l.Printf(" mean rate: %12.2f\n", m.RateMean()) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + l.Printf("timer %s\n", name) + l.Printf(" count: %9d\n", t.Count()) + l.Printf(" min: %12.2f%s\n", float64(t.Min())/du, duSuffix) + l.Printf(" max: %12.2f%s\n", float64(t.Max())/du, duSuffix) + l.Printf(" mean: %12.2f%s\n", t.Mean()/du, duSuffix) + l.Printf(" stddev: %12.2f%s\n", t.StdDev()/du, duSuffix) + l.Printf(" median: %12.2f%s\n", ps[0]/du, duSuffix) + l.Printf(" 75%%: %12.2f%s\n", ps[1]/du, duSuffix) + l.Printf(" 95%%: %12.2f%s\n", ps[2]/du, duSuffix) + l.Printf(" 99%%: %12.2f%s\n", ps[3]/du, duSuffix) + l.Printf(" 99.9%%: %12.2f%s\n", ps[4]/du, duSuffix) + l.Printf(" 1-min rate: %12.2f\n", t.Rate1()) + l.Printf(" 5-min rate: %12.2f\n", t.Rate5()) + l.Printf(" 15-min rate: %12.2f\n", t.Rate15()) + l.Printf(" mean rate: %12.2f\n", t.RateMean()) + } + }) + } +} diff --git a/vendor/github.com/rcrowley/go-metrics/memory.md b/vendor/github.com/rcrowley/go-metrics/memory.md new file mode 100644 index 0000000..47454f5 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/memory.md @@ -0,0 +1,285 @@ +Memory usage +============ + +(Highly unscientific.) + +Command used to gather static memory usage: + +```sh +grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status" +``` + +Program used to gather baseline memory usage: + +```go +package main + +import "time" + +func main() { + time.Sleep(600e9) +} +``` + +Baseline +-------- + +``` +VmPeak: 42604 kB +VmSize: 42604 kB +VmLck: 0 kB +VmHWM: 1120 kB +VmRSS: 1120 kB +VmData: 35460 kB +VmStk: 136 kB +VmExe: 1020 kB +VmLib: 1848 kB +VmPTE: 36 kB +VmSwap: 0 kB +``` + +Program used to gather metric memory usage (with other metrics being similar): + +```go +package main + +import ( + "fmt" + "metrics" + "time" +) + +func main() { + fmt.Sprintf("foo") + metrics.NewRegistry() + time.Sleep(600e9) +} +``` + +1000 counters registered +------------------------ + +``` +VmPeak: 44016 kB +VmSize: 44016 kB +VmLck: 0 kB +VmHWM: 1928 kB +VmRSS: 1928 kB +VmData: 36868 kB +VmStk: 136 kB +VmExe: 1024 kB +VmLib: 1848 kB +VmPTE: 40 kB +VmSwap: 0 kB +``` + +**1.412 kB virtual, TODO 0.808 kB resident per counter.** + +100000 counters registered +-------------------------- + +``` +VmPeak: 55024 kB +VmSize: 55024 kB +VmLck: 0 kB +VmHWM: 12440 kB +VmRSS: 12440 kB +VmData: 47876 kB +VmStk: 136 kB +VmExe: 1024 kB +VmLib: 1848 kB +VmPTE: 64 kB +VmSwap: 0 kB +``` + +**0.1242 kB virtual, 0.1132 kB resident per counter.** + +1000 gauges registered +---------------------- + +``` +VmPeak: 44012 kB +VmSize: 44012 kB +VmLck: 0 kB +VmHWM: 1928 kB +VmRSS: 1928 kB +VmData: 36868 kB +VmStk: 136 kB +VmExe: 1020 kB +VmLib: 1848 kB +VmPTE: 40 kB +VmSwap: 0 kB +``` + +**1.408 kB virtual, 0.808 kB resident per counter.** + +100000 gauges registered +------------------------ + +``` +VmPeak: 55020 kB +VmSize: 55020 kB +VmLck: 0 kB +VmHWM: 12432 kB +VmRSS: 12432 kB +VmData: 47876 kB +VmStk: 136 kB +VmExe: 1020 kB +VmLib: 1848 kB +VmPTE: 60 kB +VmSwap: 0 kB +``` + +**0.12416 kB virtual, 0.11312 resident per gauge.** + +1000 histograms with a uniform sample size of 1028 +-------------------------------------------------- + +``` +VmPeak: 72272 kB +VmSize: 72272 kB +VmLck: 0 kB +VmHWM: 16204 kB +VmRSS: 16204 kB +VmData: 65100 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 80 kB +VmSwap: 0 kB +``` + +**29.668 kB virtual, TODO 15.084 resident per histogram.** + +10000 histograms with a uniform sample size of 1028 +--------------------------------------------------- + +``` +VmPeak: 256912 kB +VmSize: 256912 kB +VmLck: 0 kB +VmHWM: 146204 kB +VmRSS: 146204 kB +VmData: 249740 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 448 kB +VmSwap: 0 kB +``` + +**21.4308 kB virtual, 14.5084 kB resident per histogram.** + +50000 histograms with a uniform sample size of 1028 +--------------------------------------------------- + +``` +VmPeak: 908112 kB +VmSize: 908112 kB +VmLck: 0 kB +VmHWM: 645832 kB +VmRSS: 645588 kB +VmData: 900940 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 1716 kB +VmSwap: 1544 kB +``` + +**17.31016 kB virtual, 12.88936 kB resident per histogram.** + +1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 +------------------------------------------------------------------------------------- + +``` +VmPeak: 62480 kB +VmSize: 62480 kB +VmLck: 0 kB +VmHWM: 11572 kB +VmRSS: 11572 kB +VmData: 55308 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 64 kB +VmSwap: 0 kB +``` + +**19.876 kB virtual, 10.452 kB resident per histogram.** + +10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 +-------------------------------------------------------------------------------------- + +``` +VmPeak: 153296 kB +VmSize: 153296 kB +VmLck: 0 kB +VmHWM: 101176 kB +VmRSS: 101176 kB +VmData: 146124 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 240 kB +VmSwap: 0 kB +``` + +**11.0692 kB virtual, 10.0056 kB resident per histogram.** + +50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 +-------------------------------------------------------------------------------------- + +``` +VmPeak: 557264 kB +VmSize: 557264 kB +VmLck: 0 kB +VmHWM: 501056 kB +VmRSS: 501056 kB +VmData: 550092 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 1032 kB +VmSwap: 0 kB +``` + +**10.2932 kB virtual, 9.99872 kB resident per histogram.** + +1000 meters +----------- + +``` +VmPeak: 74504 kB +VmSize: 74504 kB +VmLck: 0 kB +VmHWM: 24124 kB +VmRSS: 24124 kB +VmData: 67340 kB +VmStk: 136 kB +VmExe: 1040 kB +VmLib: 1848 kB +VmPTE: 92 kB +VmSwap: 0 kB +``` + +**31.9 kB virtual, 23.004 kB resident per meter.** + +10000 meters +------------ + +``` +VmPeak: 278920 kB +VmSize: 278920 kB +VmLck: 0 kB +VmHWM: 227300 kB +VmRSS: 227300 kB +VmData: 271756 kB +VmStk: 136 kB +VmExe: 1040 kB +VmLib: 1848 kB +VmPTE: 488 kB +VmSwap: 0 kB +``` + +**23.6316 kB virtual, 22.618 kB resident per meter.** diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go new file mode 100644 index 0000000..53ff329 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/meter.go @@ -0,0 +1,264 @@ +package metrics + +import ( + "sync" + "time" +) + +// Meters count events to produce exponentially-weighted moving average rates +// at one-, five-, and fifteen-minutes and a mean rate. +type Meter interface { + Count() int64 + Mark(int64) + Rate1() float64 + Rate5() float64 + Rate15() float64 + RateMean() float64 + Snapshot() Meter + Stop() +} + +// GetOrRegisterMeter returns an existing Meter or constructs and registers a +// new StandardMeter. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. +func GetOrRegisterMeter(name string, r Registry) Meter { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewMeter).(Meter) +} + +// NewMeter constructs a new StandardMeter and launches a goroutine. +// Be sure to call Stop() once the meter is of no use to allow for garbage collection. +func NewMeter() Meter { + if UseNilMetrics { + return NilMeter{} + } + m := newStandardMeter() + arbiter.Lock() + defer arbiter.Unlock() + arbiter.meters[m] = struct{}{} + if !arbiter.started { + arbiter.started = true + go arbiter.tick() + } + return m +} + +// NewMeter constructs and registers a new StandardMeter and launches a +// goroutine. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. +func NewRegisteredMeter(name string, r Registry) Meter { + c := NewMeter() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// MeterSnapshot is a read-only copy of another Meter. +type MeterSnapshot struct { + count int64 + rate1, rate5, rate15, rateMean float64 +} + +// Count returns the count of events at the time the snapshot was taken. +func (m *MeterSnapshot) Count() int64 { return m.count } + +// Mark panics. +func (*MeterSnapshot) Mark(n int64) { + panic("Mark called on a MeterSnapshot") +} + +// Rate1 returns the one-minute moving average rate of events per second at the +// time the snapshot was taken. +func (m *MeterSnapshot) Rate1() float64 { return m.rate1 } + +// Rate5 returns the five-minute moving average rate of events per second at +// the time the snapshot was taken. +func (m *MeterSnapshot) Rate5() float64 { return m.rate5 } + +// Rate15 returns the fifteen-minute moving average rate of events per second +// at the time the snapshot was taken. +func (m *MeterSnapshot) Rate15() float64 { return m.rate15 } + +// RateMean returns the meter's mean rate of events per second at the time the +// snapshot was taken. +func (m *MeterSnapshot) RateMean() float64 { return m.rateMean } + +// Snapshot returns the snapshot. +func (m *MeterSnapshot) Snapshot() Meter { return m } + +// Stop is a no-op. +func (m *MeterSnapshot) Stop() {} + +// NilMeter is a no-op Meter. +type NilMeter struct{} + +// Count is a no-op. +func (NilMeter) Count() int64 { return 0 } + +// Mark is a no-op. +func (NilMeter) Mark(n int64) {} + +// Rate1 is a no-op. +func (NilMeter) Rate1() float64 { return 0.0 } + +// Rate5 is a no-op. +func (NilMeter) Rate5() float64 { return 0.0 } + +// Rate15is a no-op. +func (NilMeter) Rate15() float64 { return 0.0 } + +// RateMean is a no-op. +func (NilMeter) RateMean() float64 { return 0.0 } + +// Snapshot is a no-op. +func (NilMeter) Snapshot() Meter { return NilMeter{} } + +// Stop is a no-op. +func (NilMeter) Stop() {} + +// StandardMeter is the standard implementation of a Meter. +type StandardMeter struct { + lock sync.RWMutex + snapshot *MeterSnapshot + a1, a5, a15 EWMA + startTime time.Time + stopped bool +} + +func newStandardMeter() *StandardMeter { + return &StandardMeter{ + snapshot: &MeterSnapshot{}, + a1: NewEWMA1(), + a5: NewEWMA5(), + a15: NewEWMA15(), + startTime: time.Now(), + } +} + +// Stop stops the meter, Mark() will be a no-op if you use it after being stopped. +func (m *StandardMeter) Stop() { + m.lock.Lock() + stopped := m.stopped + m.stopped = true + m.lock.Unlock() + if !stopped { + arbiter.Lock() + delete(arbiter.meters, m) + arbiter.Unlock() + } +} + +// Count returns the number of events recorded. +func (m *StandardMeter) Count() int64 { + m.lock.RLock() + count := m.snapshot.count + m.lock.RUnlock() + return count +} + +// Mark records the occurance of n events. +func (m *StandardMeter) Mark(n int64) { + m.lock.Lock() + defer m.lock.Unlock() + if m.stopped { + return + } + m.snapshot.count += n + m.a1.Update(n) + m.a5.Update(n) + m.a15.Update(n) + m.updateSnapshot() +} + +// Rate1 returns the one-minute moving average rate of events per second. +func (m *StandardMeter) Rate1() float64 { + m.lock.RLock() + rate1 := m.snapshot.rate1 + m.lock.RUnlock() + return rate1 +} + +// Rate5 returns the five-minute moving average rate of events per second. +func (m *StandardMeter) Rate5() float64 { + m.lock.RLock() + rate5 := m.snapshot.rate5 + m.lock.RUnlock() + return rate5 +} + +// Rate15 returns the fifteen-minute moving average rate of events per second. +func (m *StandardMeter) Rate15() float64 { + m.lock.RLock() + rate15 := m.snapshot.rate15 + m.lock.RUnlock() + return rate15 +} + +// RateMean returns the meter's mean rate of events per second. +func (m *StandardMeter) RateMean() float64 { + m.lock.RLock() + rateMean := m.snapshot.rateMean + m.lock.RUnlock() + return rateMean +} + +// Snapshot returns a read-only copy of the meter. +func (m *StandardMeter) Snapshot() Meter { + m.lock.RLock() + snapshot := *m.snapshot + m.lock.RUnlock() + return &snapshot +} + +func (m *StandardMeter) updateSnapshot() { + // should run with write lock held on m.lock + snapshot := m.snapshot + snapshot.rate1 = m.a1.Rate() + snapshot.rate5 = m.a5.Rate() + snapshot.rate15 = m.a15.Rate() + snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds() +} + +func (m *StandardMeter) tick() { + m.lock.Lock() + defer m.lock.Unlock() + m.a1.Tick() + m.a5.Tick() + m.a15.Tick() + m.updateSnapshot() +} + +// meterArbiter ticks meters every 5s from a single goroutine. +// meters are references in a set for future stopping. +type meterArbiter struct { + sync.RWMutex + started bool + meters map[*StandardMeter]struct{} + ticker *time.Ticker +} + +var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})} + +// Ticks meters on the scheduled interval +func (ma *meterArbiter) tick() { + for { + select { + case <-ma.ticker.C: + ma.tickMeters() + } + } +} + +func (ma *meterArbiter) tickMeters() { + ma.RLock() + defer ma.RUnlock() + for meter := range ma.meters { + meter.tick() + } +} diff --git a/vendor/github.com/rcrowley/go-metrics/metrics.go b/vendor/github.com/rcrowley/go-metrics/metrics.go new file mode 100644 index 0000000..b97a49e --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/metrics.go @@ -0,0 +1,13 @@ +// Go port of Coda Hale's Metrics library +// +// +// +// Coda Hale's original work: +package metrics + +// UseNilMetrics is checked by the constructor functions for all of the +// standard metrics. If it is true, the metric returned is a stub. +// +// This global kill-switch helps quantify the observer effect and makes +// for less cluttered pprof profiles. +var UseNilMetrics bool = false diff --git a/vendor/github.com/rcrowley/go-metrics/opentsdb.go b/vendor/github.com/rcrowley/go-metrics/opentsdb.go new file mode 100644 index 0000000..266b6c9 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/opentsdb.go @@ -0,0 +1,119 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "os" + "strings" + "time" +) + +var shortHostName string = "" + +// OpenTSDBConfig provides a container with configuration parameters for +// the OpenTSDB exporter +type OpenTSDBConfig struct { + Addr *net.TCPAddr // Network address to connect to + Registry Registry // Registry to be exported + FlushInterval time.Duration // Flush interval + DurationUnit time.Duration // Time conversion unit for durations + Prefix string // Prefix to be prepended to metric names +} + +// OpenTSDB is a blocking exporter function which reports metrics in r +// to a TSDB server located at addr, flushing them every d duration +// and prepending metric names with prefix. +func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { + OpenTSDBWithConfig(OpenTSDBConfig{ + Addr: addr, + Registry: r, + FlushInterval: d, + DurationUnit: time.Nanosecond, + Prefix: prefix, + }) +} + +// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB, +// but it takes a OpenTSDBConfig instead. +func OpenTSDBWithConfig(c OpenTSDBConfig) { + for _ = range time.Tick(c.FlushInterval) { + if err := openTSDB(&c); nil != err { + log.Println(err) + } + } +} + +func getShortHostname() string { + if shortHostName == "" { + host, _ := os.Hostname() + if index := strings.Index(host, "."); index > 0 { + shortHostName = host[:index] + } else { + shortHostName = host + } + } + return shortHostName +} + +func openTSDB(c *OpenTSDBConfig) error { + shortHostname := getShortHostname() + now := time.Now().Unix() + du := float64(c.DurationUnit) + conn, err := net.DialTCP("tcp", nil, c.Addr) + if nil != err { + return err + } + defer conn.Close() + w := bufio.NewWriter(conn) + c.Registry.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname) + case Gauge: + fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) + case GaugeFloat64: + fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname) + fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname) + fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname) + fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname) + fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname) + fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname) + fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname) + fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname) + fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname) + case Meter: + m := metric.Snapshot() + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname) + fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname) + fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname) + fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname) + fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname) + fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname) + fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname) + fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname) + } + w.Flush() + }) + return nil +} diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go new file mode 100644 index 0000000..6c0007b --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/registry.go @@ -0,0 +1,354 @@ +package metrics + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// DuplicateMetric is the error returned by Registry.Register when a metric +// already exists. If you mean to Register that metric you must first +// Unregister the existing metric. +type DuplicateMetric string + +func (err DuplicateMetric) Error() string { + return fmt.Sprintf("duplicate metric: %s", string(err)) +} + +// A Registry holds references to a set of metrics by name and can iterate +// over them, calling callback functions provided by the user. +// +// This is an interface so as to encourage other structs to implement +// the Registry API as appropriate. +type Registry interface { + + // Call the given function for each registered metric. + Each(func(string, interface{})) + + // Get the metric by the given name or nil if none is registered. + Get(string) interface{} + + // GetAll metrics in the Registry. + GetAll() map[string]map[string]interface{} + + // Gets an existing metric or registers the given one. + // The interface can be the metric to register if not found in registry, + // or a function returning the metric for lazy instantiation. + GetOrRegister(string, interface{}) interface{} + + // Register the given metric under the given name. + Register(string, interface{}) error + + // Run all registered healthchecks. + RunHealthchecks() + + // Unregister the metric with the given name. + Unregister(string) + + // Unregister all metrics. (Mostly for testing.) + UnregisterAll() +} + +// The standard implementation of a Registry is a mutex-protected map +// of names to metrics. +type StandardRegistry struct { + metrics map[string]interface{} + mutex sync.Mutex +} + +// Create a new registry. +func NewRegistry() Registry { + return &StandardRegistry{metrics: make(map[string]interface{})} +} + +// Call the given function for each registered metric. +func (r *StandardRegistry) Each(f func(string, interface{})) { + for name, i := range r.registered() { + f(name, i) + } +} + +// Get the metric by the given name or nil if none is registered. +func (r *StandardRegistry) Get(name string) interface{} { + r.mutex.Lock() + defer r.mutex.Unlock() + return r.metrics[name] +} + +// Gets an existing metric or creates and registers a new one. Threadsafe +// alternative to calling Get and Register on failure. +// The interface can be the metric to register if not found in registry, +// or a function returning the metric for lazy instantiation. +func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} { + r.mutex.Lock() + defer r.mutex.Unlock() + if metric, ok := r.metrics[name]; ok { + return metric + } + if v := reflect.ValueOf(i); v.Kind() == reflect.Func { + i = v.Call(nil)[0].Interface() + } + r.register(name, i) + return i +} + +// Register the given metric under the given name. Returns a DuplicateMetric +// if a metric by the given name is already registered. +func (r *StandardRegistry) Register(name string, i interface{}) error { + r.mutex.Lock() + defer r.mutex.Unlock() + return r.register(name, i) +} + +// Run all registered healthchecks. +func (r *StandardRegistry) RunHealthchecks() { + r.mutex.Lock() + defer r.mutex.Unlock() + for _, i := range r.metrics { + if h, ok := i.(Healthcheck); ok { + h.Check() + } + } +} + +// GetAll metrics in the Registry +func (r *StandardRegistry) GetAll() map[string]map[string]interface{} { + data := make(map[string]map[string]interface{}) + r.Each(func(name string, i interface{}) { + values := make(map[string]interface{}) + switch metric := i.(type) { + case Counter: + values["count"] = metric.Count() + case Gauge: + values["value"] = metric.Value() + case GaugeFloat64: + values["value"] = metric.Value() + case Healthcheck: + values["error"] = nil + metric.Check() + if err := metric.Error(); nil != err { + values["error"] = metric.Error().Error() + } + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + values["count"] = h.Count() + values["min"] = h.Min() + values["max"] = h.Max() + values["mean"] = h.Mean() + values["stddev"] = h.StdDev() + values["median"] = ps[0] + values["75%"] = ps[1] + values["95%"] = ps[2] + values["99%"] = ps[3] + values["99.9%"] = ps[4] + case Meter: + m := metric.Snapshot() + values["count"] = m.Count() + values["1m.rate"] = m.Rate1() + values["5m.rate"] = m.Rate5() + values["15m.rate"] = m.Rate15() + values["mean.rate"] = m.RateMean() + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + values["count"] = t.Count() + values["min"] = t.Min() + values["max"] = t.Max() + values["mean"] = t.Mean() + values["stddev"] = t.StdDev() + values["median"] = ps[0] + values["75%"] = ps[1] + values["95%"] = ps[2] + values["99%"] = ps[3] + values["99.9%"] = ps[4] + values["1m.rate"] = t.Rate1() + values["5m.rate"] = t.Rate5() + values["15m.rate"] = t.Rate15() + values["mean.rate"] = t.RateMean() + } + data[name] = values + }) + return data +} + +// Unregister the metric with the given name. +func (r *StandardRegistry) Unregister(name string) { + r.mutex.Lock() + defer r.mutex.Unlock() + r.stop(name) + delete(r.metrics, name) +} + +// Unregister all metrics. (Mostly for testing.) +func (r *StandardRegistry) UnregisterAll() { + r.mutex.Lock() + defer r.mutex.Unlock() + for name, _ := range r.metrics { + r.stop(name) + delete(r.metrics, name) + } +} + +func (r *StandardRegistry) register(name string, i interface{}) error { + if _, ok := r.metrics[name]; ok { + return DuplicateMetric(name) + } + switch i.(type) { + case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer: + r.metrics[name] = i + } + return nil +} + +func (r *StandardRegistry) registered() map[string]interface{} { + r.mutex.Lock() + defer r.mutex.Unlock() + metrics := make(map[string]interface{}, len(r.metrics)) + for name, i := range r.metrics { + metrics[name] = i + } + return metrics +} + +func (r *StandardRegistry) stop(name string) { + if i, ok := r.metrics[name]; ok { + if s, ok := i.(Stoppable); ok { + s.Stop() + } + } +} + +// Stoppable defines the metrics which has to be stopped. +type Stoppable interface { + Stop() +} + +type PrefixedRegistry struct { + underlying Registry + prefix string +} + +func NewPrefixedRegistry(prefix string) Registry { + return &PrefixedRegistry{ + underlying: NewRegistry(), + prefix: prefix, + } +} + +func NewPrefixedChildRegistry(parent Registry, prefix string) Registry { + return &PrefixedRegistry{ + underlying: parent, + prefix: prefix, + } +} + +// Call the given function for each registered metric. +func (r *PrefixedRegistry) Each(fn func(string, interface{})) { + wrappedFn := func(prefix string) func(string, interface{}) { + return func(name string, iface interface{}) { + if strings.HasPrefix(name, prefix) { + fn(name, iface) + } else { + return + } + } + } + + baseRegistry, prefix := findPrefix(r, "") + baseRegistry.Each(wrappedFn(prefix)) +} + +func findPrefix(registry Registry, prefix string) (Registry, string) { + switch r := registry.(type) { + case *PrefixedRegistry: + return findPrefix(r.underlying, r.prefix+prefix) + case *StandardRegistry: + return r, prefix + } + return nil, "" +} + +// Get the metric by the given name or nil if none is registered. +func (r *PrefixedRegistry) Get(name string) interface{} { + realName := r.prefix + name + return r.underlying.Get(realName) +} + +// Gets an existing metric or registers the given one. +// The interface can be the metric to register if not found in registry, +// or a function returning the metric for lazy instantiation. +func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} { + realName := r.prefix + name + return r.underlying.GetOrRegister(realName, metric) +} + +// Register the given metric under the given name. The name will be prefixed. +func (r *PrefixedRegistry) Register(name string, metric interface{}) error { + realName := r.prefix + name + return r.underlying.Register(realName, metric) +} + +// Run all registered healthchecks. +func (r *PrefixedRegistry) RunHealthchecks() { + r.underlying.RunHealthchecks() +} + +// GetAll metrics in the Registry +func (r *PrefixedRegistry) GetAll() map[string]map[string]interface{} { + return r.underlying.GetAll() +} + +// Unregister the metric with the given name. The name will be prefixed. +func (r *PrefixedRegistry) Unregister(name string) { + realName := r.prefix + name + r.underlying.Unregister(realName) +} + +// Unregister all metrics. (Mostly for testing.) +func (r *PrefixedRegistry) UnregisterAll() { + r.underlying.UnregisterAll() +} + +var DefaultRegistry Registry = NewRegistry() + +// Call the given function for each registered metric. +func Each(f func(string, interface{})) { + DefaultRegistry.Each(f) +} + +// Get the metric by the given name or nil if none is registered. +func Get(name string) interface{} { + return DefaultRegistry.Get(name) +} + +// Gets an existing metric or creates and registers a new one. Threadsafe +// alternative to calling Get and Register on failure. +func GetOrRegister(name string, i interface{}) interface{} { + return DefaultRegistry.GetOrRegister(name, i) +} + +// Register the given metric under the given name. Returns a DuplicateMetric +// if a metric by the given name is already registered. +func Register(name string, i interface{}) error { + return DefaultRegistry.Register(name, i) +} + +// Register the given metric under the given name. Panics if a metric by the +// given name is already registered. +func MustRegister(name string, i interface{}) { + if err := Register(name, i); err != nil { + panic(err) + } +} + +// Run all registered healthchecks. +func RunHealthchecks() { + DefaultRegistry.RunHealthchecks() +} + +// Unregister the metric with the given name. +func Unregister(name string) { + DefaultRegistry.Unregister(name) +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go new file mode 100644 index 0000000..11c6b78 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime.go @@ -0,0 +1,212 @@ +package metrics + +import ( + "runtime" + "runtime/pprof" + "time" +) + +var ( + memStats runtime.MemStats + runtimeMetrics struct { + MemStats struct { + Alloc Gauge + BuckHashSys Gauge + DebugGC Gauge + EnableGC Gauge + Frees Gauge + HeapAlloc Gauge + HeapIdle Gauge + HeapInuse Gauge + HeapObjects Gauge + HeapReleased Gauge + HeapSys Gauge + LastGC Gauge + Lookups Gauge + Mallocs Gauge + MCacheInuse Gauge + MCacheSys Gauge + MSpanInuse Gauge + MSpanSys Gauge + NextGC Gauge + NumGC Gauge + GCCPUFraction GaugeFloat64 + PauseNs Histogram + PauseTotalNs Gauge + StackInuse Gauge + StackSys Gauge + Sys Gauge + TotalAlloc Gauge + } + NumCgoCall Gauge + NumGoroutine Gauge + NumThread Gauge + ReadMemStats Timer + } + frees uint64 + lookups uint64 + mallocs uint64 + numGC uint32 + numCgoCalls int64 + + threadCreateProfile = pprof.Lookup("threadcreate") +) + +// Capture new values for the Go runtime statistics exported in +// runtime.MemStats. This is designed to be called as a goroutine. +func CaptureRuntimeMemStats(r Registry, d time.Duration) { + for _ = range time.Tick(d) { + CaptureRuntimeMemStatsOnce(r) + } +} + +// Capture new values for the Go runtime statistics exported in +// runtime.MemStats. This is designed to be called in a background +// goroutine. Giving a registry which has not been given to +// RegisterRuntimeMemStats will panic. +// +// Be very careful with this because runtime.ReadMemStats calls the C +// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld() +// and that last one does what it says on the tin. +func CaptureRuntimeMemStatsOnce(r Registry) { + t := time.Now() + runtime.ReadMemStats(&memStats) // This takes 50-200us. + runtimeMetrics.ReadMemStats.UpdateSince(t) + + runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc)) + runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys)) + if memStats.DebugGC { + runtimeMetrics.MemStats.DebugGC.Update(1) + } else { + runtimeMetrics.MemStats.DebugGC.Update(0) + } + if memStats.EnableGC { + runtimeMetrics.MemStats.EnableGC.Update(1) + } else { + runtimeMetrics.MemStats.EnableGC.Update(0) + } + + runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees)) + runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc)) + runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle)) + runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse)) + runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects)) + runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased)) + runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys)) + runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC)) + runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups)) + runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs)) + runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse)) + runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys)) + runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse)) + runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys)) + runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC)) + runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC)) + runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats)) + + // + i := numGC % uint32(len(memStats.PauseNs)) + ii := memStats.NumGC % uint32(len(memStats.PauseNs)) + if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) { + for i = 0; i < uint32(len(memStats.PauseNs)); i++ { + runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) + } + } else { + if i > ii { + for ; i < uint32(len(memStats.PauseNs)); i++ { + runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) + } + i = 0 + } + for ; i < ii; i++ { + runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) + } + } + frees = memStats.Frees + lookups = memStats.Lookups + mallocs = memStats.Mallocs + numGC = memStats.NumGC + + runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs)) + runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse)) + runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys)) + runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys)) + runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc)) + + currentNumCgoCalls := numCgoCall() + runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls) + numCgoCalls = currentNumCgoCalls + + runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine())) + + runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count())) +} + +// Register runtimeMetrics for the Go runtime statistics exported in runtime and +// specifically runtime.MemStats. The runtimeMetrics are named by their +// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc. +func RegisterRuntimeMemStats(r Registry) { + runtimeMetrics.MemStats.Alloc = NewGauge() + runtimeMetrics.MemStats.BuckHashSys = NewGauge() + runtimeMetrics.MemStats.DebugGC = NewGauge() + runtimeMetrics.MemStats.EnableGC = NewGauge() + runtimeMetrics.MemStats.Frees = NewGauge() + runtimeMetrics.MemStats.HeapAlloc = NewGauge() + runtimeMetrics.MemStats.HeapIdle = NewGauge() + runtimeMetrics.MemStats.HeapInuse = NewGauge() + runtimeMetrics.MemStats.HeapObjects = NewGauge() + runtimeMetrics.MemStats.HeapReleased = NewGauge() + runtimeMetrics.MemStats.HeapSys = NewGauge() + runtimeMetrics.MemStats.LastGC = NewGauge() + runtimeMetrics.MemStats.Lookups = NewGauge() + runtimeMetrics.MemStats.Mallocs = NewGauge() + runtimeMetrics.MemStats.MCacheInuse = NewGauge() + runtimeMetrics.MemStats.MCacheSys = NewGauge() + runtimeMetrics.MemStats.MSpanInuse = NewGauge() + runtimeMetrics.MemStats.MSpanSys = NewGauge() + runtimeMetrics.MemStats.NextGC = NewGauge() + runtimeMetrics.MemStats.NumGC = NewGauge() + runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64() + runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015)) + runtimeMetrics.MemStats.PauseTotalNs = NewGauge() + runtimeMetrics.MemStats.StackInuse = NewGauge() + runtimeMetrics.MemStats.StackSys = NewGauge() + runtimeMetrics.MemStats.Sys = NewGauge() + runtimeMetrics.MemStats.TotalAlloc = NewGauge() + runtimeMetrics.NumCgoCall = NewGauge() + runtimeMetrics.NumGoroutine = NewGauge() + runtimeMetrics.NumThread = NewGauge() + runtimeMetrics.ReadMemStats = NewTimer() + + r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc) + r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys) + r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC) + r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC) + r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees) + r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc) + r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle) + r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse) + r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects) + r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased) + r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys) + r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC) + r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups) + r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs) + r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse) + r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys) + r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse) + r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys) + r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC) + r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC) + r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction) + r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs) + r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs) + r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse) + r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys) + r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys) + r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc) + r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall) + r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine) + r.Register("runtime.NumThread", runtimeMetrics.NumThread) + r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats) +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go new file mode 100644 index 0000000..e3391f4 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go @@ -0,0 +1,10 @@ +// +build cgo +// +build !appengine + +package metrics + +import "runtime" + +func numCgoCall() int64 { + return runtime.NumCgoCall() +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go new file mode 100644 index 0000000..ca12c05 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go @@ -0,0 +1,9 @@ +// +build go1.5 + +package metrics + +import "runtime" + +func gcCPUFraction(memStats *runtime.MemStats) float64 { + return memStats.GCCPUFraction +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go new file mode 100644 index 0000000..616a3b4 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go @@ -0,0 +1,7 @@ +// +build !cgo appengine + +package metrics + +func numCgoCall() int64 { + return 0 +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go new file mode 100644 index 0000000..be96aa6 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go @@ -0,0 +1,9 @@ +// +build !go1.5 + +package metrics + +import "runtime" + +func gcCPUFraction(memStats *runtime.MemStats) float64 { + return 0 +} diff --git a/vendor/github.com/rcrowley/go-metrics/sample.go b/vendor/github.com/rcrowley/go-metrics/sample.go new file mode 100644 index 0000000..fecee5e --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/sample.go @@ -0,0 +1,616 @@ +package metrics + +import ( + "math" + "math/rand" + "sort" + "sync" + "time" +) + +const rescaleThreshold = time.Hour + +// Samples maintain a statistically-significant selection of values from +// a stream. +type Sample interface { + Clear() + Count() int64 + Max() int64 + Mean() float64 + Min() int64 + Percentile(float64) float64 + Percentiles([]float64) []float64 + Size() int + Snapshot() Sample + StdDev() float64 + Sum() int64 + Update(int64) + Values() []int64 + Variance() float64 +} + +// ExpDecaySample is an exponentially-decaying sample using a forward-decaying +// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time +// Decay Model for Streaming Systems". +// +// +type ExpDecaySample struct { + alpha float64 + count int64 + mutex sync.Mutex + reservoirSize int + t0, t1 time.Time + values *expDecaySampleHeap +} + +// NewExpDecaySample constructs a new exponentially-decaying sample with the +// given reservoir size and alpha. +func NewExpDecaySample(reservoirSize int, alpha float64) Sample { + if UseNilMetrics { + return NilSample{} + } + s := &ExpDecaySample{ + alpha: alpha, + reservoirSize: reservoirSize, + t0: time.Now(), + values: newExpDecaySampleHeap(reservoirSize), + } + s.t1 = s.t0.Add(rescaleThreshold) + return s +} + +// Clear clears all samples. +func (s *ExpDecaySample) Clear() { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count = 0 + s.t0 = time.Now() + s.t1 = s.t0.Add(rescaleThreshold) + s.values.Clear() +} + +// Count returns the number of samples recorded, which may exceed the +// reservoir size. +func (s *ExpDecaySample) Count() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.count +} + +// Max returns the maximum value in the sample, which may not be the maximum +// value ever to be part of the sample. +func (s *ExpDecaySample) Max() int64 { + return SampleMax(s.Values()) +} + +// Mean returns the mean of the values in the sample. +func (s *ExpDecaySample) Mean() float64 { + return SampleMean(s.Values()) +} + +// Min returns the minimum value in the sample, which may not be the minimum +// value ever to be part of the sample. +func (s *ExpDecaySample) Min() int64 { + return SampleMin(s.Values()) +} + +// Percentile returns an arbitrary percentile of values in the sample. +func (s *ExpDecaySample) Percentile(p float64) float64 { + return SamplePercentile(s.Values(), p) +} + +// Percentiles returns a slice of arbitrary percentiles of values in the +// sample. +func (s *ExpDecaySample) Percentiles(ps []float64) []float64 { + return SamplePercentiles(s.Values(), ps) +} + +// Size returns the size of the sample, which is at most the reservoir size. +func (s *ExpDecaySample) Size() int { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.values.Size() +} + +// Snapshot returns a read-only copy of the sample. +func (s *ExpDecaySample) Snapshot() Sample { + s.mutex.Lock() + defer s.mutex.Unlock() + vals := s.values.Values() + values := make([]int64, len(vals)) + for i, v := range vals { + values[i] = v.v + } + return &SampleSnapshot{ + count: s.count, + values: values, + } +} + +// StdDev returns the standard deviation of the values in the sample. +func (s *ExpDecaySample) StdDev() float64 { + return SampleStdDev(s.Values()) +} + +// Sum returns the sum of the values in the sample. +func (s *ExpDecaySample) Sum() int64 { + return SampleSum(s.Values()) +} + +// Update samples a new value. +func (s *ExpDecaySample) Update(v int64) { + s.update(time.Now(), v) +} + +// Values returns a copy of the values in the sample. +func (s *ExpDecaySample) Values() []int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + vals := s.values.Values() + values := make([]int64, len(vals)) + for i, v := range vals { + values[i] = v.v + } + return values +} + +// Variance returns the variance of the values in the sample. +func (s *ExpDecaySample) Variance() float64 { + return SampleVariance(s.Values()) +} + +// update samples a new value at a particular timestamp. This is a method all +// its own to facilitate testing. +func (s *ExpDecaySample) update(t time.Time, v int64) { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count++ + if s.values.Size() == s.reservoirSize { + s.values.Pop() + } + s.values.Push(expDecaySample{ + k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(), + v: v, + }) + if t.After(s.t1) { + values := s.values.Values() + t0 := s.t0 + s.values.Clear() + s.t0 = t + s.t1 = s.t0.Add(rescaleThreshold) + for _, v := range values { + v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds()) + s.values.Push(v) + } + } +} + +// NilSample is a no-op Sample. +type NilSample struct{} + +// Clear is a no-op. +func (NilSample) Clear() {} + +// Count is a no-op. +func (NilSample) Count() int64 { return 0 } + +// Max is a no-op. +func (NilSample) Max() int64 { return 0 } + +// Mean is a no-op. +func (NilSample) Mean() float64 { return 0.0 } + +// Min is a no-op. +func (NilSample) Min() int64 { return 0 } + +// Percentile is a no-op. +func (NilSample) Percentile(p float64) float64 { return 0.0 } + +// Percentiles is a no-op. +func (NilSample) Percentiles(ps []float64) []float64 { + return make([]float64, len(ps)) +} + +// Size is a no-op. +func (NilSample) Size() int { return 0 } + +// Sample is a no-op. +func (NilSample) Snapshot() Sample { return NilSample{} } + +// StdDev is a no-op. +func (NilSample) StdDev() float64 { return 0.0 } + +// Sum is a no-op. +func (NilSample) Sum() int64 { return 0 } + +// Update is a no-op. +func (NilSample) Update(v int64) {} + +// Values is a no-op. +func (NilSample) Values() []int64 { return []int64{} } + +// Variance is a no-op. +func (NilSample) Variance() float64 { return 0.0 } + +// SampleMax returns the maximum value of the slice of int64. +func SampleMax(values []int64) int64 { + if 0 == len(values) { + return 0 + } + var max int64 = math.MinInt64 + for _, v := range values { + if max < v { + max = v + } + } + return max +} + +// SampleMean returns the mean value of the slice of int64. +func SampleMean(values []int64) float64 { + if 0 == len(values) { + return 0.0 + } + return float64(SampleSum(values)) / float64(len(values)) +} + +// SampleMin returns the minimum value of the slice of int64. +func SampleMin(values []int64) int64 { + if 0 == len(values) { + return 0 + } + var min int64 = math.MaxInt64 + for _, v := range values { + if min > v { + min = v + } + } + return min +} + +// SamplePercentiles returns an arbitrary percentile of the slice of int64. +func SamplePercentile(values int64Slice, p float64) float64 { + return SamplePercentiles(values, []float64{p})[0] +} + +// SamplePercentiles returns a slice of arbitrary percentiles of the slice of +// int64. +func SamplePercentiles(values int64Slice, ps []float64) []float64 { + scores := make([]float64, len(ps)) + size := len(values) + if size > 0 { + sort.Sort(values) + for i, p := range ps { + pos := p * float64(size+1) + if pos < 1.0 { + scores[i] = float64(values[0]) + } else if pos >= float64(size) { + scores[i] = float64(values[size-1]) + } else { + lower := float64(values[int(pos)-1]) + upper := float64(values[int(pos)]) + scores[i] = lower + (pos-math.Floor(pos))*(upper-lower) + } + } + } + return scores +} + +// SampleSnapshot is a read-only copy of another Sample. +type SampleSnapshot struct { + count int64 + values []int64 +} + +func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot { + return &SampleSnapshot{ + count: count, + values: values, + } +} + +// Clear panics. +func (*SampleSnapshot) Clear() { + panic("Clear called on a SampleSnapshot") +} + +// Count returns the count of inputs at the time the snapshot was taken. +func (s *SampleSnapshot) Count() int64 { return s.count } + +// Max returns the maximal value at the time the snapshot was taken. +func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) } + +// Mean returns the mean value at the time the snapshot was taken. +func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) } + +// Min returns the minimal value at the time the snapshot was taken. +func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) } + +// Percentile returns an arbitrary percentile of values at the time the +// snapshot was taken. +func (s *SampleSnapshot) Percentile(p float64) float64 { + return SamplePercentile(s.values, p) +} + +// Percentiles returns a slice of arbitrary percentiles of values at the time +// the snapshot was taken. +func (s *SampleSnapshot) Percentiles(ps []float64) []float64 { + return SamplePercentiles(s.values, ps) +} + +// Size returns the size of the sample at the time the snapshot was taken. +func (s *SampleSnapshot) Size() int { return len(s.values) } + +// Snapshot returns the snapshot. +func (s *SampleSnapshot) Snapshot() Sample { return s } + +// StdDev returns the standard deviation of values at the time the snapshot was +// taken. +func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) } + +// Sum returns the sum of values at the time the snapshot was taken. +func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) } + +// Update panics. +func (*SampleSnapshot) Update(int64) { + panic("Update called on a SampleSnapshot") +} + +// Values returns a copy of the values in the sample. +func (s *SampleSnapshot) Values() []int64 { + values := make([]int64, len(s.values)) + copy(values, s.values) + return values +} + +// Variance returns the variance of values at the time the snapshot was taken. +func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) } + +// SampleStdDev returns the standard deviation of the slice of int64. +func SampleStdDev(values []int64) float64 { + return math.Sqrt(SampleVariance(values)) +} + +// SampleSum returns the sum of the slice of int64. +func SampleSum(values []int64) int64 { + var sum int64 + for _, v := range values { + sum += v + } + return sum +} + +// SampleVariance returns the variance of the slice of int64. +func SampleVariance(values []int64) float64 { + if 0 == len(values) { + return 0.0 + } + m := SampleMean(values) + var sum float64 + for _, v := range values { + d := float64(v) - m + sum += d * d + } + return sum / float64(len(values)) +} + +// A uniform sample using Vitter's Algorithm R. +// +// +type UniformSample struct { + count int64 + mutex sync.Mutex + reservoirSize int + values []int64 +} + +// NewUniformSample constructs a new uniform sample with the given reservoir +// size. +func NewUniformSample(reservoirSize int) Sample { + if UseNilMetrics { + return NilSample{} + } + return &UniformSample{ + reservoirSize: reservoirSize, + values: make([]int64, 0, reservoirSize), + } +} + +// Clear clears all samples. +func (s *UniformSample) Clear() { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count = 0 + s.values = make([]int64, 0, s.reservoirSize) +} + +// Count returns the number of samples recorded, which may exceed the +// reservoir size. +func (s *UniformSample) Count() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.count +} + +// Max returns the maximum value in the sample, which may not be the maximum +// value ever to be part of the sample. +func (s *UniformSample) Max() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleMax(s.values) +} + +// Mean returns the mean of the values in the sample. +func (s *UniformSample) Mean() float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleMean(s.values) +} + +// Min returns the minimum value in the sample, which may not be the minimum +// value ever to be part of the sample. +func (s *UniformSample) Min() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleMin(s.values) +} + +// Percentile returns an arbitrary percentile of values in the sample. +func (s *UniformSample) Percentile(p float64) float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SamplePercentile(s.values, p) +} + +// Percentiles returns a slice of arbitrary percentiles of values in the +// sample. +func (s *UniformSample) Percentiles(ps []float64) []float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SamplePercentiles(s.values, ps) +} + +// Size returns the size of the sample, which is at most the reservoir size. +func (s *UniformSample) Size() int { + s.mutex.Lock() + defer s.mutex.Unlock() + return len(s.values) +} + +// Snapshot returns a read-only copy of the sample. +func (s *UniformSample) Snapshot() Sample { + s.mutex.Lock() + defer s.mutex.Unlock() + values := make([]int64, len(s.values)) + copy(values, s.values) + return &SampleSnapshot{ + count: s.count, + values: values, + } +} + +// StdDev returns the standard deviation of the values in the sample. +func (s *UniformSample) StdDev() float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleStdDev(s.values) +} + +// Sum returns the sum of the values in the sample. +func (s *UniformSample) Sum() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleSum(s.values) +} + +// Update samples a new value. +func (s *UniformSample) Update(v int64) { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count++ + if len(s.values) < s.reservoirSize { + s.values = append(s.values, v) + } else { + r := rand.Int63n(s.count) + if r < int64(len(s.values)) { + s.values[int(r)] = v + } + } +} + +// Values returns a copy of the values in the sample. +func (s *UniformSample) Values() []int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + values := make([]int64, len(s.values)) + copy(values, s.values) + return values +} + +// Variance returns the variance of the values in the sample. +func (s *UniformSample) Variance() float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleVariance(s.values) +} + +// expDecaySample represents an individual sample in a heap. +type expDecaySample struct { + k float64 + v int64 +} + +func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap { + return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)} +} + +// expDecaySampleHeap is a min-heap of expDecaySamples. +// The internal implementation is copied from the standard library's container/heap +type expDecaySampleHeap struct { + s []expDecaySample +} + +func (h *expDecaySampleHeap) Clear() { + h.s = h.s[:0] +} + +func (h *expDecaySampleHeap) Push(s expDecaySample) { + n := len(h.s) + h.s = h.s[0 : n+1] + h.s[n] = s + h.up(n) +} + +func (h *expDecaySampleHeap) Pop() expDecaySample { + n := len(h.s) - 1 + h.s[0], h.s[n] = h.s[n], h.s[0] + h.down(0, n) + + n = len(h.s) + s := h.s[n-1] + h.s = h.s[0 : n-1] + return s +} + +func (h *expDecaySampleHeap) Size() int { + return len(h.s) +} + +func (h *expDecaySampleHeap) Values() []expDecaySample { + return h.s +} + +func (h *expDecaySampleHeap) up(j int) { + for { + i := (j - 1) / 2 // parent + if i == j || !(h.s[j].k < h.s[i].k) { + break + } + h.s[i], h.s[j] = h.s[j], h.s[i] + j = i + } +} + +func (h *expDecaySampleHeap) down(i, n int) { + for { + j1 := 2*i + 1 + if j1 >= n || j1 < 0 { // j1 < 0 after int overflow + break + } + j := j1 // left child + if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) { + j = j2 // = 2*i + 2 // right child + } + if !(h.s[j].k < h.s[i].k) { + break + } + h.s[i], h.s[j] = h.s[j], h.s[i] + i = j + } +} + +type int64Slice []int64 + +func (p int64Slice) Len() int { return len(p) } +func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/rcrowley/go-metrics/syslog.go b/vendor/github.com/rcrowley/go-metrics/syslog.go new file mode 100644 index 0000000..693f190 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/syslog.go @@ -0,0 +1,78 @@ +// +build !windows + +package metrics + +import ( + "fmt" + "log/syslog" + "time" +) + +// Output each metric in the given registry to syslog periodically using +// the given syslogger. +func Syslog(r Registry, d time.Duration, w *syslog.Writer) { + for _ = range time.Tick(d) { + r.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count())) + case Gauge: + w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value())) + case GaugeFloat64: + w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value())) + case Healthcheck: + metric.Check() + w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error())) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + w.Info(fmt.Sprintf( + "histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f", + name, + h.Count(), + h.Min(), + h.Max(), + h.Mean(), + h.StdDev(), + ps[0], + ps[1], + ps[2], + ps[3], + ps[4], + )) + case Meter: + m := metric.Snapshot() + w.Info(fmt.Sprintf( + "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f", + name, + m.Count(), + m.Rate1(), + m.Rate5(), + m.Rate15(), + m.RateMean(), + )) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + w.Info(fmt.Sprintf( + "timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f", + name, + t.Count(), + t.Min(), + t.Max(), + t.Mean(), + t.StdDev(), + ps[0], + ps[1], + ps[2], + ps[3], + ps[4], + t.Rate1(), + t.Rate5(), + t.Rate15(), + t.RateMean(), + )) + } + }) + } +} diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/vendor/github.com/rcrowley/go-metrics/timer.go new file mode 100644 index 0000000..d6ec4c6 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/timer.go @@ -0,0 +1,329 @@ +package metrics + +import ( + "sync" + "time" +) + +// Timers capture the duration and rate of events. +type Timer interface { + Count() int64 + Max() int64 + Mean() float64 + Min() int64 + Percentile(float64) float64 + Percentiles([]float64) []float64 + Rate1() float64 + Rate5() float64 + Rate15() float64 + RateMean() float64 + Snapshot() Timer + StdDev() float64 + Stop() + Sum() int64 + Time(func()) + Update(time.Duration) + UpdateSince(time.Time) + Variance() float64 +} + +// GetOrRegisterTimer returns an existing Timer or constructs and registers a +// new StandardTimer. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. +func GetOrRegisterTimer(name string, r Registry) Timer { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewTimer).(Timer) +} + +// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter. +// Be sure to call Stop() once the timer is of no use to allow for garbage collection. +func NewCustomTimer(h Histogram, m Meter) Timer { + if UseNilMetrics { + return NilTimer{} + } + return &StandardTimer{ + histogram: h, + meter: m, + } +} + +// NewRegisteredTimer constructs and registers a new StandardTimer. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. +func NewRegisteredTimer(name string, r Registry) Timer { + c := NewTimer() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// NewTimer constructs a new StandardTimer using an exponentially-decaying +// sample with the same reservoir size and alpha as UNIX load averages. +// Be sure to call Stop() once the timer is of no use to allow for garbage collection. +func NewTimer() Timer { + if UseNilMetrics { + return NilTimer{} + } + return &StandardTimer{ + histogram: NewHistogram(NewExpDecaySample(1028, 0.015)), + meter: NewMeter(), + } +} + +// NilTimer is a no-op Timer. +type NilTimer struct { + h Histogram + m Meter +} + +// Count is a no-op. +func (NilTimer) Count() int64 { return 0 } + +// Max is a no-op. +func (NilTimer) Max() int64 { return 0 } + +// Mean is a no-op. +func (NilTimer) Mean() float64 { return 0.0 } + +// Min is a no-op. +func (NilTimer) Min() int64 { return 0 } + +// Percentile is a no-op. +func (NilTimer) Percentile(p float64) float64 { return 0.0 } + +// Percentiles is a no-op. +func (NilTimer) Percentiles(ps []float64) []float64 { + return make([]float64, len(ps)) +} + +// Rate1 is a no-op. +func (NilTimer) Rate1() float64 { return 0.0 } + +// Rate5 is a no-op. +func (NilTimer) Rate5() float64 { return 0.0 } + +// Rate15 is a no-op. +func (NilTimer) Rate15() float64 { return 0.0 } + +// RateMean is a no-op. +func (NilTimer) RateMean() float64 { return 0.0 } + +// Snapshot is a no-op. +func (NilTimer) Snapshot() Timer { return NilTimer{} } + +// StdDev is a no-op. +func (NilTimer) StdDev() float64 { return 0.0 } + +// Stop is a no-op. +func (NilTimer) Stop() {} + +// Sum is a no-op. +func (NilTimer) Sum() int64 { return 0 } + +// Time is a no-op. +func (NilTimer) Time(func()) {} + +// Update is a no-op. +func (NilTimer) Update(time.Duration) {} + +// UpdateSince is a no-op. +func (NilTimer) UpdateSince(time.Time) {} + +// Variance is a no-op. +func (NilTimer) Variance() float64 { return 0.0 } + +// StandardTimer is the standard implementation of a Timer and uses a Histogram +// and Meter. +type StandardTimer struct { + histogram Histogram + meter Meter + mutex sync.Mutex +} + +// Count returns the number of events recorded. +func (t *StandardTimer) Count() int64 { + return t.histogram.Count() +} + +// Max returns the maximum value in the sample. +func (t *StandardTimer) Max() int64 { + return t.histogram.Max() +} + +// Mean returns the mean of the values in the sample. +func (t *StandardTimer) Mean() float64 { + return t.histogram.Mean() +} + +// Min returns the minimum value in the sample. +func (t *StandardTimer) Min() int64 { + return t.histogram.Min() +} + +// Percentile returns an arbitrary percentile of the values in the sample. +func (t *StandardTimer) Percentile(p float64) float64 { + return t.histogram.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of the values in the +// sample. +func (t *StandardTimer) Percentiles(ps []float64) []float64 { + return t.histogram.Percentiles(ps) +} + +// Rate1 returns the one-minute moving average rate of events per second. +func (t *StandardTimer) Rate1() float64 { + return t.meter.Rate1() +} + +// Rate5 returns the five-minute moving average rate of events per second. +func (t *StandardTimer) Rate5() float64 { + return t.meter.Rate5() +} + +// Rate15 returns the fifteen-minute moving average rate of events per second. +func (t *StandardTimer) Rate15() float64 { + return t.meter.Rate15() +} + +// RateMean returns the meter's mean rate of events per second. +func (t *StandardTimer) RateMean() float64 { + return t.meter.RateMean() +} + +// Snapshot returns a read-only copy of the timer. +func (t *StandardTimer) Snapshot() Timer { + t.mutex.Lock() + defer t.mutex.Unlock() + return &TimerSnapshot{ + histogram: t.histogram.Snapshot().(*HistogramSnapshot), + meter: t.meter.Snapshot().(*MeterSnapshot), + } +} + +// StdDev returns the standard deviation of the values in the sample. +func (t *StandardTimer) StdDev() float64 { + return t.histogram.StdDev() +} + +// Stop stops the meter. +func (t *StandardTimer) Stop() { + t.meter.Stop() +} + +// Sum returns the sum in the sample. +func (t *StandardTimer) Sum() int64 { + return t.histogram.Sum() +} + +// Record the duration of the execution of the given function. +func (t *StandardTimer) Time(f func()) { + ts := time.Now() + f() + t.Update(time.Since(ts)) +} + +// Record the duration of an event. +func (t *StandardTimer) Update(d time.Duration) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.histogram.Update(int64(d)) + t.meter.Mark(1) +} + +// Record the duration of an event that started at a time and ends now. +func (t *StandardTimer) UpdateSince(ts time.Time) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.histogram.Update(int64(time.Since(ts))) + t.meter.Mark(1) +} + +// Variance returns the variance of the values in the sample. +func (t *StandardTimer) Variance() float64 { + return t.histogram.Variance() +} + +// TimerSnapshot is a read-only copy of another Timer. +type TimerSnapshot struct { + histogram *HistogramSnapshot + meter *MeterSnapshot +} + +// Count returns the number of events recorded at the time the snapshot was +// taken. +func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() } + +// Max returns the maximum value at the time the snapshot was taken. +func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() } + +// Mean returns the mean value at the time the snapshot was taken. +func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() } + +// Min returns the minimum value at the time the snapshot was taken. +func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() } + +// Percentile returns an arbitrary percentile of sampled values at the time the +// snapshot was taken. +func (t *TimerSnapshot) Percentile(p float64) float64 { + return t.histogram.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of sampled values at +// the time the snapshot was taken. +func (t *TimerSnapshot) Percentiles(ps []float64) []float64 { + return t.histogram.Percentiles(ps) +} + +// Rate1 returns the one-minute moving average rate of events per second at the +// time the snapshot was taken. +func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() } + +// Rate5 returns the five-minute moving average rate of events per second at +// the time the snapshot was taken. +func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() } + +// Rate15 returns the fifteen-minute moving average rate of events per second +// at the time the snapshot was taken. +func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() } + +// RateMean returns the meter's mean rate of events per second at the time the +// snapshot was taken. +func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() } + +// Snapshot returns the snapshot. +func (t *TimerSnapshot) Snapshot() Timer { return t } + +// StdDev returns the standard deviation of the values at the time the snapshot +// was taken. +func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() } + +// Stop is a no-op. +func (t *TimerSnapshot) Stop() {} + +// Sum returns the sum at the time the snapshot was taken. +func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() } + +// Time panics. +func (*TimerSnapshot) Time(func()) { + panic("Time called on a TimerSnapshot") +} + +// Update panics. +func (*TimerSnapshot) Update(time.Duration) { + panic("Update called on a TimerSnapshot") +} + +// UpdateSince panics. +func (*TimerSnapshot) UpdateSince(time.Time) { + panic("UpdateSince called on a TimerSnapshot") +} + +// Variance returns the variance of the values at the time the snapshot was +// taken. +func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() } diff --git a/vendor/github.com/rcrowley/go-metrics/validate.sh b/vendor/github.com/rcrowley/go-metrics/validate.sh new file mode 100755 index 0000000..c4ae91e --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/validate.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +set -e + +# check there are no formatting issues +GOFMT_LINES=`gofmt -l . | wc -l | xargs` +test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues" + +# run the tests for the root package +go test -race . diff --git a/vendor/github.com/rcrowley/go-metrics/writer.go b/vendor/github.com/rcrowley/go-metrics/writer.go new file mode 100644 index 0000000..091e971 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/writer.go @@ -0,0 +1,100 @@ +package metrics + +import ( + "fmt" + "io" + "sort" + "time" +) + +// Write sorts writes each metric in the given registry periodically to the +// given io.Writer. +func Write(r Registry, d time.Duration, w io.Writer) { + for _ = range time.Tick(d) { + WriteOnce(r, w) + } +} + +// WriteOnce sorts and writes metrics in the given registry to the given +// io.Writer. +func WriteOnce(r Registry, w io.Writer) { + var namedMetrics namedMetricSlice + r.Each(func(name string, i interface{}) { + namedMetrics = append(namedMetrics, namedMetric{name, i}) + }) + + sort.Sort(namedMetrics) + for _, namedMetric := range namedMetrics { + switch metric := namedMetric.m.(type) { + case Counter: + fmt.Fprintf(w, "counter %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", metric.Count()) + case Gauge: + fmt.Fprintf(w, "gauge %s\n", namedMetric.name) + fmt.Fprintf(w, " value: %9d\n", metric.Value()) + case GaugeFloat64: + fmt.Fprintf(w, "gauge %s\n", namedMetric.name) + fmt.Fprintf(w, " value: %f\n", metric.Value()) + case Healthcheck: + metric.Check() + fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name) + fmt.Fprintf(w, " error: %v\n", metric.Error()) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "histogram %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", h.Count()) + fmt.Fprintf(w, " min: %9d\n", h.Min()) + fmt.Fprintf(w, " max: %9d\n", h.Max()) + fmt.Fprintf(w, " mean: %12.2f\n", h.Mean()) + fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev()) + fmt.Fprintf(w, " median: %12.2f\n", ps[0]) + fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) + fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) + fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) + fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) + case Meter: + m := metric.Snapshot() + fmt.Fprintf(w, "meter %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", m.Count()) + fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1()) + fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5()) + fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15()) + fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean()) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "timer %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", t.Count()) + fmt.Fprintf(w, " min: %9d\n", t.Min()) + fmt.Fprintf(w, " max: %9d\n", t.Max()) + fmt.Fprintf(w, " mean: %12.2f\n", t.Mean()) + fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev()) + fmt.Fprintf(w, " median: %12.2f\n", ps[0]) + fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) + fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) + fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) + fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) + fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1()) + fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5()) + fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15()) + fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean()) + } + } +} + +type namedMetric struct { + name string + m interface{} +} + +// namedMetricSlice is a slice of namedMetrics that implements sort.Interface. +type namedMetricSlice []namedMetric + +func (nms namedMetricSlice) Len() int { return len(nms) } + +func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] } + +func (nms namedMetricSlice) Less(i, j int) bool { + return nms[i].name < nms[j].name +}