diff --git a/Gopkg.lock b/Gopkg.lock index 168fa323..b1ddc961 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -10,13 +10,12 @@ version = "v1.3.4" [[projects]] - branch = "cm-v1-create-acl-request" - digest = "1:7ba8eb8f8a834b59e9c771aa2a0228a78bfde2736aefb1aa0aa25854692f8520" + digest = "1:dcffbfae4b048c3e927ada13b0bfece5bb1958553ef7b1c595bd1a6dc17b0220" name = "github.com/Shopify/sarama" packages = ["."] pruneopts = "NUT" - revision = "e8436b87bcb0daea746b79c779fe2b19eb5e282e" - source = "github.com/Mongey/sarama" + revision = "5d2af84cf5e2dd36f2daecaaafa13c4e286f20fd" + version = "v1.22.0" [[projects]] digest = "1:8bd40ec66a32437126e8ff3080f26b11ca5926917daa01eba1285d0b059416bc" @@ -539,7 +538,7 @@ [[projects]] branch = "master" - digest = "1:7bd4c9c1f1410e2a2a31ef28044c96a4d95d89aa46e4cc7091af25d0b1efbc44" + digest = "1:3199fc7bc956fbbfbbceca12347591619b369cf4e3b8b41894668e1faf1fdbd7" name = "golang.org/x/net" packages = [ "context", @@ -549,7 +548,9 @@ "http2", "http2/hpack", "idna", + "internal/socks", "internal/timeseries", + "proxy", "trace", ] pruneopts = "NUT" diff --git a/Gopkg.toml b/Gopkg.toml index 9cbd3ded..26e3d980 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -3,8 +3,7 @@ [[constraint]] name = "github.com/Shopify/sarama" - source = "github.com/Mongey/sarama" - branch = "cm-v1-create-acl-request" + version = "v1.22.0" [[constraint]] name = "github.com/hashicorp/terraform" diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/Shopify/sarama/acl_bindings.go index 42310809..22b9796a 100644 --- a/vendor/github.com/Shopify/sarama/acl_bindings.go +++ b/vendor/github.com/Shopify/sarama/acl_bindings.go @@ -1,12 +1,13 @@ package sarama +//Resource holds information about acl resource type type Resource struct { ResourceType AclResourceType ResourceName string ResoucePatternType AclResourcePatternType } -func (r *Resource) encode(pe packetEncoder, version int) error { +func (r *Resource) encode(pe packetEncoder, version int16) error { pe.putInt8(int8(r.ResourceType)) if err := pe.putString(r.ResourceName); err != nil { @@ -45,6 +46,7 @@ func (r *Resource) decode(pd packetDecoder, version int16) (err error) { return nil } +//Acl holds information about acl type type Acl struct { Principal string Host string @@ -91,12 +93,13 @@ func (a *Acl) decode(pd packetDecoder, version int16) (err error) { return nil } +//ResourceAcls is an acl resource type type ResourceAcls struct { Resource Acls []*Acl } -func (r *ResourceAcls) encode(pe packetEncoder, version int) error { +func (r *ResourceAcls) encode(pe packetEncoder, version int16) error { if err := r.Resource.encode(pe, version); err != nil { return err } diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go index 1ec91358..da1cdefc 100644 --- a/vendor/github.com/Shopify/sarama/acl_create_request.go +++ b/vendor/github.com/Shopify/sarama/acl_create_request.go @@ -1,7 +1,8 @@ package sarama +//CreateAclsRequest is an acl creation request type CreateAclsRequest struct { - Version int + Version int16 AclCreations []*AclCreation } @@ -20,7 +21,7 @@ func (c *CreateAclsRequest) encode(pe packetEncoder) error { } func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) { - c.Version = int(version) + c.Version = version n, err := pd.getArrayLength() if err != nil { return err @@ -38,16 +39,16 @@ func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) return nil } -func (d *CreateAclsRequest) key() int16 { +func (c *CreateAclsRequest) key() int16 { return 30 } -func (d *CreateAclsRequest) version() int16 { - return int16(d.Version) +func (c *CreateAclsRequest) version() int16 { + return c.Version } -func (d *CreateAclsRequest) requiredVersion() KafkaVersion { - switch d.Version { +func (c *CreateAclsRequest) requiredVersion() KafkaVersion { + switch c.Version { case 1: return V2_0_0_0 default: @@ -55,12 +56,13 @@ func (d *CreateAclsRequest) requiredVersion() KafkaVersion { } } +//AclCreation is a wrapper around Resource and Acl type type AclCreation struct { Resource Acl } -func (a *AclCreation) encode(pe packetEncoder, version int) error { +func (a *AclCreation) encode(pe packetEncoder, version int16) error { if err := a.Resource.encode(pe, version); err != nil { return err } diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go index 8a56f357..f5a5e9a6 100644 --- a/vendor/github.com/Shopify/sarama/acl_create_response.go +++ b/vendor/github.com/Shopify/sarama/acl_create_response.go @@ -2,6 +2,7 @@ package sarama import "time" +//CreateAclsResponse is a an acl reponse creation type type CreateAclsResponse struct { ThrottleTime time.Duration AclCreationResponses []*AclCreationResponse @@ -46,18 +47,19 @@ func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) return nil } -func (d *CreateAclsResponse) key() int16 { +func (c *CreateAclsResponse) key() int16 { return 30 } -func (d *CreateAclsResponse) version() int16 { +func (c *CreateAclsResponse) version() int16 { return 0 } -func (d *CreateAclsResponse) requiredVersion() KafkaVersion { +func (c *CreateAclsResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } +//AclCreationResponse is an acl creation response type type AclCreationResponse struct { Err KError ErrMsg *string diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go index 5e94ad73..15908eac 100644 --- a/vendor/github.com/Shopify/sarama/acl_delete_request.go +++ b/vendor/github.com/Shopify/sarama/acl_delete_request.go @@ -1,5 +1,6 @@ package sarama +//DeleteAclsRequest is a delete acl request type DeleteAclsRequest struct { Version int Filters []*AclFilter diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go index 45ad14e0..65295652 100644 --- a/vendor/github.com/Shopify/sarama/acl_delete_response.go +++ b/vendor/github.com/Shopify/sarama/acl_delete_response.go @@ -2,21 +2,22 @@ package sarama import "time" +//DeleteAclsResponse is a delete acl response type DeleteAclsResponse struct { - Version int + Version int16 ThrottleTime time.Duration FilterResponses []*FilterResponse } -func (a *DeleteAclsResponse) encode(pe packetEncoder) error { - pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) +func (d *DeleteAclsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) - if err := pe.putArrayLength(len(a.FilterResponses)); err != nil { + if err := pe.putArrayLength(len(d.FilterResponses)); err != nil { return err } - for _, filterResponse := range a.FilterResponses { - if err := filterResponse.encode(pe, a.Version); err != nil { + for _, filterResponse := range d.FilterResponses { + if err := filterResponse.encode(pe, d.Version); err != nil { return err } } @@ -24,22 +25,22 @@ func (a *DeleteAclsResponse) encode(pe packetEncoder) error { return nil } -func (a *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) { +func (d *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) { throttleTime, err := pd.getInt32() if err != nil { return err } - a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond n, err := pd.getArrayLength() if err != nil { return err } - a.FilterResponses = make([]*FilterResponse, n) + d.FilterResponses = make([]*FilterResponse, n) for i := 0; i < n; i++ { - a.FilterResponses[i] = new(FilterResponse) - if err := a.FilterResponses[i].decode(pd, version); err != nil { + d.FilterResponses[i] = new(FilterResponse) + if err := d.FilterResponses[i].decode(pd, version); err != nil { return err } } @@ -59,13 +60,14 @@ func (d *DeleteAclsResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } +//FilterResponse is a filter response type type FilterResponse struct { Err KError ErrMsg *string MatchingAcls []*MatchingAcl } -func (f *FilterResponse) encode(pe packetEncoder, version int) error { +func (f *FilterResponse) encode(pe packetEncoder, version int16) error { pe.putInt16(int16(f.Err)) if err := pe.putNullableString(f.ErrMsg); err != nil { return err @@ -109,6 +111,7 @@ func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) { return nil } +//MatchingAcl is a matching acl type type MatchingAcl struct { Err KError ErrMsg *string @@ -116,7 +119,7 @@ type MatchingAcl struct { Acl } -func (m *MatchingAcl) encode(pe packetEncoder, version int) error { +func (m *MatchingAcl) encode(pe packetEncoder, version int16) error { pe.putInt16(int16(m.Err)) if err := pe.putNullableString(m.ErrMsg); err != nil { return err diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go index 3c95320e..5222d46e 100644 --- a/vendor/github.com/Shopify/sarama/acl_describe_request.go +++ b/vendor/github.com/Shopify/sarama/acl_describe_request.go @@ -1,5 +1,6 @@ package sarama +//DescribeAclsRequest is a secribe acl request type type DescribeAclsRequest struct { Version int AclFilter diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go index 5bda1f58..12126e54 100644 --- a/vendor/github.com/Shopify/sarama/acl_describe_response.go +++ b/vendor/github.com/Shopify/sarama/acl_describe_response.go @@ -2,8 +2,9 @@ package sarama import "time" +//DescribeAclsResponse is a describe acl response type type DescribeAclsResponse struct { - Version int + Version int16 ThrottleTime time.Duration Err KError ErrMsg *string diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/Shopify/sarama/acl_types.go index 9cc1b660..c10ad7b9 100644 --- a/vendor/github.com/Shopify/sarama/acl_types.go +++ b/vendor/github.com/Shopify/sarama/acl_types.go @@ -1,54 +1,55 @@ package sarama -type AclOperation int +type ( + AclOperation int + + AclPermissionType int + + AclResourceType int + + AclResourcePatternType int +) // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java const ( - AclOperationUnknown AclOperation = 0 - AclOperationAny AclOperation = 1 - AclOperationAll AclOperation = 2 - AclOperationRead AclOperation = 3 - AclOperationWrite AclOperation = 4 - AclOperationCreate AclOperation = 5 - AclOperationDelete AclOperation = 6 - AclOperationAlter AclOperation = 7 - AclOperationDescribe AclOperation = 8 - AclOperationClusterAction AclOperation = 9 - AclOperationDescribeConfigs AclOperation = 10 - AclOperationAlterConfigs AclOperation = 11 - AclOperationIdempotentWrite AclOperation = 12 + AclOperationUnknown AclOperation = iota + AclOperationAny + AclOperationAll + AclOperationRead + AclOperationWrite + AclOperationCreate + AclOperationDelete + AclOperationAlter + AclOperationDescribe + AclOperationClusterAction + AclOperationDescribeConfigs + AclOperationAlterConfigs + AclOperationIdempotentWrite ) -type AclPermissionType int - // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java const ( - AclPermissionUnknown AclPermissionType = 0 - AclPermissionAny AclPermissionType = 1 - AclPermissionDeny AclPermissionType = 2 - AclPermissionAllow AclPermissionType = 3 + AclPermissionUnknown AclPermissionType = iota + AclPermissionAny + AclPermissionDeny + AclPermissionAllow ) -type AclResourceType int - // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java const ( - AclResourceUnknown AclResourceType = 0 - AclResourceAny AclResourceType = 1 - AclResourceTopic AclResourceType = 2 - AclResourceGroup AclResourceType = 3 - AclResourceCluster AclResourceType = 4 - AclResourceTransactionalID AclResourceType = 5 + AclResourceUnknown AclResourceType = iota + AclResourceAny + AclResourceTopic + AclResourceGroup + AclResourceCluster + AclResourceTransactionalID ) -type AclResourcePatternType int - // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java - const ( - AclPatternUnknown AclResourcePatternType = 0 - AclPatternAny AclResourcePatternType = 1 - AclPatternMatch AclResourcePatternType = 2 - AclPatternLiteral AclResourcePatternType = 3 - AclPatternPrefixed AclResourcePatternType = 4 + AclPatternUnknown AclResourcePatternType = iota + AclPatternAny + AclPatternMatch + AclPatternLiteral + AclPatternPrefixed ) diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go index 6da166c6..fc227ab8 100644 --- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go +++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go @@ -1,5 +1,6 @@ package sarama +//AddOffsetsToTxnRequest adds offsets to a transaction request type AddOffsetsToTxnRequest struct { TransactionalID string ProducerID int64 diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go index 3a46151a..c88c1f89 100644 --- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go +++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go @@ -4,6 +4,7 @@ import ( "time" ) +//AddOffsetsToTxnResponse is a response type for adding offsets to txns type AddOffsetsToTxnResponse struct { ThrottleTime time.Duration Err KError diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go index a8a59225..8d4b42e3 100644 --- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go +++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go @@ -1,5 +1,6 @@ package sarama +//AddPartitionsToTxnRequest is a add paartition request type AddPartitionsToTxnRequest struct { TransactionalID string ProducerID int64 diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go index 581c556c..eb4f23ec 100644 --- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go +++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go @@ -4,6 +4,7 @@ import ( "time" ) +//AddPartitionsToTxnResponse is a partition errors to transaction type type AddPartitionsToTxnResponse struct { ThrottleTime time.Duration Errors map[string][]*PartitionError @@ -82,6 +83,7 @@ func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } +//PartitionError is a partition error type type PartitionError struct { Partition int32 Err KError diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/Shopify/sarama/admin.go index 52725758..d88a0a49 100644 --- a/vendor/github.com/Shopify/sarama/admin.go +++ b/vendor/github.com/Shopify/sarama/admin.go @@ -1,6 +1,10 @@ package sarama -import "errors" +import ( + "errors" + "math/rand" + "sync" +) // ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics, // brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0. @@ -13,6 +17,12 @@ type ClusterAdmin interface { // may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0. CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error + // List the topics available in the cluster with the default options. + ListTopics() (map[string]TopicDetail, error) + + // Describe some topics in the cluster. + DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) + // Delete a topic. It may take several seconds after the DeleteTopic to returns success // and for all the brokers to become aware that the topics are gone. // During this time, listTopics may continue to return information about the deleted topic. @@ -65,6 +75,18 @@ type ClusterAdmin interface { // This operation is supported by brokers with version 0.11.0.0 or higher. DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) + // List the consumer groups available in the cluster. + ListConsumerGroups() (map[string]string, error) + + // Describe the given consumer groups. + DescribeConsumerGroups(groups []string) ([]*GroupDescription, error) + + // List the consumer group offsets available in the cluster. + ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) + + // Get information about the nodes in the cluster. + DescribeCluster() (brokers []*Broker, controllerID int32, err error) + // Close shuts down the admin and closes underlying client. Close() error } @@ -109,7 +131,7 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO } if detail == nil { - return errors.New("You must specify topic details") + return errors.New("you must specify topic details") } topicDetails := make(map[string]*TopicDetail) @@ -144,12 +166,135 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO } if topicErr.Err != ErrNoError { - return topicErr.Err + return topicErr } return nil } +func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) { + controller, err := ca.Controller() + if err != nil { + return nil, err + } + + request := &MetadataRequest{ + Topics: topics, + AllowAutoTopicCreation: false, + } + + if ca.conf.Version.IsAtLeast(V1_0_0_0) { + request.Version = 5 + } else if ca.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 4 + } + + response, err := controller.GetMetadata(request) + if err != nil { + return nil, err + } + return response.Topics, nil +} + +func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) { + controller, err := ca.Controller() + if err != nil { + return nil, int32(0), err + } + + request := &MetadataRequest{ + Topics: []string{}, + } + + response, err := controller.GetMetadata(request) + if err != nil { + return nil, int32(0), err + } + + return response.Brokers, response.ControllerID, nil +} + +func (ca *clusterAdmin) findAnyBroker() (*Broker, error) { + brokers := ca.client.Brokers() + if len(brokers) > 0 { + index := rand.Intn(len(brokers)) + return brokers[index], nil + } + return nil, errors.New("no available broker") +} + +func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) { + // In order to build TopicDetails we need to first get the list of all + // topics using a MetadataRequest and then get their configs using a + // DescribeConfigsRequest request. To avoid sending many requests to the + // broker, we use a single DescribeConfigsRequest. + + // Send the all-topic MetadataRequest + b, err := ca.findAnyBroker() + if err != nil { + return nil, err + } + _ = b.Open(ca.client.Config()) + + metadataReq := &MetadataRequest{} + metadataResp, err := b.GetMetadata(metadataReq) + if err != nil { + return nil, err + } + + topicsDetailsMap := make(map[string]TopicDetail) + + var describeConfigsResources []*ConfigResource + + for _, topic := range metadataResp.Topics { + topicDetails := TopicDetail{ + NumPartitions: int32(len(topic.Partitions)), + } + if len(topic.Partitions) > 0 { + topicDetails.ReplicaAssignment = map[int32][]int32{} + for _, partition := range topic.Partitions { + topicDetails.ReplicaAssignment[partition.ID] = partition.Replicas + } + topicDetails.ReplicationFactor = int16(len(topic.Partitions[0].Replicas)) + } + topicsDetailsMap[topic.Name] = topicDetails + + // we populate the resources we want to describe from the MetadataResponse + topicResource := ConfigResource{ + Type: TopicResource, + Name: topic.Name, + } + describeConfigsResources = append(describeConfigsResources, &topicResource) + } + + // Send the DescribeConfigsRequest + describeConfigsReq := &DescribeConfigsRequest{ + Resources: describeConfigsResources, + } + describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq) + if err != nil { + return nil, err + } + + for _, resource := range describeConfigsResp.Resources { + topicDetails := topicsDetailsMap[resource.Name] + topicDetails.ConfigEntries = make(map[string]*string) + + for _, entry := range resource.Configs { + // only include non-default non-sensitive config + // (don't actually think topic config will ever be sensitive) + if entry.Default || entry.Sensitive { + continue + } + topicDetails.ConfigEntries[entry.Name] = &entry.Value + } + + topicsDetailsMap[resource.Name] = topicDetails + } + + return topicsDetailsMap, nil +} + func (ca *clusterAdmin) DeleteTopic(topic string) error { if topic == "" { @@ -215,7 +360,7 @@ func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [ } if topicErr.Err != ErrNoError { - return topicErr.Err + return topicErr } return nil @@ -380,3 +525,92 @@ func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]Matchi } return mAcls, nil } + +func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) { + groupsPerBroker := make(map[*Broker][]string) + + for _, group := range groups { + controller, err := ca.client.Coordinator(group) + if err != nil { + return nil, err + } + groupsPerBroker[controller] = append(groupsPerBroker[controller], group) + + } + + for broker, brokerGroups := range groupsPerBroker { + response, err := broker.DescribeGroups(&DescribeGroupsRequest{ + Groups: brokerGroups, + }) + if err != nil { + return nil, err + } + + result = append(result, response.Groups...) + } + return result, nil +} + +func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err error) { + allGroups = make(map[string]string) + + // Query brokers in parallel, since we have to query *all* brokers + brokers := ca.client.Brokers() + groupMaps := make(chan map[string]string, len(brokers)) + errors := make(chan error, len(brokers)) + wg := sync.WaitGroup{} + + for _, b := range brokers { + wg.Add(1) + go func(b *Broker, conf *Config) { + defer wg.Done() + _ = b.Open(conf) // Ensure that broker is opened + + response, err := b.ListGroups(&ListGroupsRequest{}) + if err != nil { + errors <- err + return + } + + groups := make(map[string]string) + for group, typ := range response.Groups { + groups[group] = typ + } + + groupMaps <- groups + + }(b, ca.conf) + } + + wg.Wait() + close(groupMaps) + close(errors) + + for groupMap := range groupMaps { + for group, protocolType := range groupMap { + allGroups[group] = protocolType + } + } + + // Intentionally return only the first error for simplicity + err = <-errors + return +} + +func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) { + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return nil, err + } + + request := &OffsetFetchRequest{ + ConsumerGroup: group, + partitions: topicPartitions, + } + + if ca.conf.Version.IsAtLeast(V0_8_2_2) { + request.Version = 1 + } + + return coordinator.FetchOffset(request) +} diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go index 48c44ead..26c275b8 100644 --- a/vendor/github.com/Shopify/sarama/alter_configs_request.go +++ b/vendor/github.com/Shopify/sarama/alter_configs_request.go @@ -1,45 +1,47 @@ package sarama +//AlterConfigsRequest is an alter config request type type AlterConfigsRequest struct { Resources []*AlterConfigsResource ValidateOnly bool } +//AlterConfigsResource is an alter config resource type type AlterConfigsResource struct { Type ConfigResourceType Name string ConfigEntries map[string]*string } -func (acr *AlterConfigsRequest) encode(pe packetEncoder) error { - if err := pe.putArrayLength(len(acr.Resources)); err != nil { +func (a *AlterConfigsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(a.Resources)); err != nil { return err } - for _, r := range acr.Resources { + for _, r := range a.Resources { if err := r.encode(pe); err != nil { return err } } - pe.putBool(acr.ValidateOnly) + pe.putBool(a.ValidateOnly) return nil } -func (acr *AlterConfigsRequest) decode(pd packetDecoder, version int16) error { +func (a *AlterConfigsRequest) decode(pd packetDecoder, version int16) error { resourceCount, err := pd.getArrayLength() if err != nil { return err } - acr.Resources = make([]*AlterConfigsResource, resourceCount) - for i := range acr.Resources { + a.Resources = make([]*AlterConfigsResource, resourceCount) + for i := range a.Resources { r := &AlterConfigsResource{} err = r.decode(pd, version) if err != nil { return err } - acr.Resources[i] = r + a.Resources[i] = r } validateOnly, err := pd.getBool() @@ -47,22 +49,22 @@ func (acr *AlterConfigsRequest) decode(pd packetDecoder, version int16) error { return err } - acr.ValidateOnly = validateOnly + a.ValidateOnly = validateOnly return nil } -func (ac *AlterConfigsResource) encode(pe packetEncoder) error { - pe.putInt8(int8(ac.Type)) +func (a *AlterConfigsResource) encode(pe packetEncoder) error { + pe.putInt8(int8(a.Type)) - if err := pe.putString(ac.Name); err != nil { + if err := pe.putString(a.Name); err != nil { return err } - if err := pe.putArrayLength(len(ac.ConfigEntries)); err != nil { + if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil { return err } - for configKey, configValue := range ac.ConfigEntries { + for configKey, configValue := range a.ConfigEntries { if err := pe.putString(configKey); err != nil { return err } @@ -74,18 +76,18 @@ func (ac *AlterConfigsResource) encode(pe packetEncoder) error { return nil } -func (ac *AlterConfigsResource) decode(pd packetDecoder, version int16) error { +func (a *AlterConfigsResource) decode(pd packetDecoder, version int16) error { t, err := pd.getInt8() if err != nil { return err } - ac.Type = ConfigResourceType(t) + a.Type = ConfigResourceType(t) name, err := pd.getString() if err != nil { return err } - ac.Name = name + a.Name = name n, err := pd.getArrayLength() if err != nil { @@ -93,13 +95,13 @@ func (ac *AlterConfigsResource) decode(pd packetDecoder, version int16) error { } if n > 0 { - ac.ConfigEntries = make(map[string]*string, n) + a.ConfigEntries = make(map[string]*string, n) for i := 0; i < n; i++ { configKey, err := pd.getString() if err != nil { return err } - if ac.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { + if a.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { return err } } @@ -107,14 +109,14 @@ func (ac *AlterConfigsResource) decode(pd packetDecoder, version int16) error { return err } -func (acr *AlterConfigsRequest) key() int16 { +func (a *AlterConfigsRequest) key() int16 { return 33 } -func (acr *AlterConfigsRequest) version() int16 { +func (a *AlterConfigsRequest) version() int16 { return 0 } -func (acr *AlterConfigsRequest) requiredVersion() KafkaVersion { +func (a *AlterConfigsRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go index 29b09e1f..3893663c 100644 --- a/vendor/github.com/Shopify/sarama/alter_configs_response.go +++ b/vendor/github.com/Shopify/sarama/alter_configs_response.go @@ -2,11 +2,13 @@ package sarama import "time" +//AlterConfigsResponse is a reponse type for alter config type AlterConfigsResponse struct { ThrottleTime time.Duration Resources []*AlterConfigsResourceResponse } +//AlterConfigsResourceResponse is a reponse type for alter config resource type AlterConfigsResourceResponse struct { ErrorCode int16 ErrorMsg string @@ -14,21 +16,21 @@ type AlterConfigsResourceResponse struct { Name string } -func (ct *AlterConfigsResponse) encode(pe packetEncoder) error { - pe.putInt32(int32(ct.ThrottleTime / time.Millisecond)) +func (a *AlterConfigsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) - if err := pe.putArrayLength(len(ct.Resources)); err != nil { + if err := pe.putArrayLength(len(a.Resources)); err != nil { return err } - for i := range ct.Resources { - pe.putInt16(ct.Resources[i].ErrorCode) - err := pe.putString(ct.Resources[i].ErrorMsg) + for i := range a.Resources { + pe.putInt16(a.Resources[i].ErrorCode) + err := pe.putString(a.Resources[i].ErrorMsg) if err != nil { return nil } - pe.putInt8(int8(ct.Resources[i].Type)) - err = pe.putString(ct.Resources[i].Name) + pe.putInt8(int8(a.Resources[i].Type)) + err = pe.putString(a.Resources[i].Name) if err != nil { return nil } @@ -37,59 +39,59 @@ func (ct *AlterConfigsResponse) encode(pe packetEncoder) error { return nil } -func (acr *AlterConfigsResponse) decode(pd packetDecoder, version int16) error { +func (a *AlterConfigsResponse) decode(pd packetDecoder, version int16) error { throttleTime, err := pd.getInt32() if err != nil { return err } - acr.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond responseCount, err := pd.getArrayLength() if err != nil { return err } - acr.Resources = make([]*AlterConfigsResourceResponse, responseCount) + a.Resources = make([]*AlterConfigsResourceResponse, responseCount) - for i := range acr.Resources { - acr.Resources[i] = new(AlterConfigsResourceResponse) + for i := range a.Resources { + a.Resources[i] = new(AlterConfigsResourceResponse) errCode, err := pd.getInt16() if err != nil { return err } - acr.Resources[i].ErrorCode = errCode + a.Resources[i].ErrorCode = errCode e, err := pd.getString() if err != nil { return err } - acr.Resources[i].ErrorMsg = e + a.Resources[i].ErrorMsg = e t, err := pd.getInt8() if err != nil { return err } - acr.Resources[i].Type = ConfigResourceType(t) + a.Resources[i].Type = ConfigResourceType(t) name, err := pd.getString() if err != nil { return err } - acr.Resources[i].Name = name + a.Resources[i].Name = name } return nil } -func (r *AlterConfigsResponse) key() int16 { +func (a *AlterConfigsResponse) key() int16 { return 32 } -func (r *AlterConfigsResponse) version() int16 { +func (a *AlterConfigsResponse) version() int16 { return 0 } -func (r *AlterConfigsResponse) requiredVersion() KafkaVersion { +func (a *AlterConfigsResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go index ab65f01c..b33167c0 100644 --- a/vendor/github.com/Shopify/sarama/api_versions_request.go +++ b/vendor/github.com/Shopify/sarama/api_versions_request.go @@ -1,24 +1,25 @@ package sarama +//ApiVersionsRequest ... type ApiVersionsRequest struct { } -func (r *ApiVersionsRequest) encode(pe packetEncoder) error { +func (a *ApiVersionsRequest) encode(pe packetEncoder) error { return nil } -func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { +func (a *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { return nil } -func (r *ApiVersionsRequest) key() int16 { +func (a *ApiVersionsRequest) key() int16 { return 18 } -func (r *ApiVersionsRequest) version() int16 { +func (a *ApiVersionsRequest) version() int16 { return 0 } -func (r *ApiVersionsRequest) requiredVersion() KafkaVersion { +func (a *ApiVersionsRequest) requiredVersion() KafkaVersion { return V0_10_0_0 } diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go index 23bc326e..bb1f0b31 100644 --- a/vendor/github.com/Shopify/sarama/api_versions_response.go +++ b/vendor/github.com/Shopify/sarama/api_versions_response.go @@ -1,5 +1,6 @@ package sarama +//ApiVersionsResponseBlock is an api version reponse block type type ApiVersionsResponseBlock struct { ApiKey int16 MinVersion int16 @@ -31,6 +32,7 @@ func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error { return nil } +//ApiVersionsResponse is an api version response type type ApiVersionsResponse struct { Err KError ApiVersions []*ApiVersionsResponseBlock diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go index 89a0c702..5db0a73d 100644 --- a/vendor/github.com/Shopify/sarama/async_producer.go +++ b/vendor/github.com/Shopify/sarama/async_producer.go @@ -92,9 +92,8 @@ func newTransactionManager(conf *Config, client Client) (*transactionManager, er } type asyncProducer struct { - client Client - conf *Config - ownClient bool + client Client + conf *Config errors chan *ProducerError input, successes, retries chan *ProducerMessage @@ -113,18 +112,19 @@ func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) { if err != nil { return nil, err } - - p, err := NewAsyncProducerFromClient(client) - if err != nil { - return nil, err - } - p.(*asyncProducer).ownClient = true - return p, nil + return newAsyncProducer(client) } // NewAsyncProducerFromClient creates a new Producer using the given client. It is still // necessary to call Close() on the underlying client when shutting down this producer. func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) { + // For clients passed in by the client, ensure we don't + // call Close() on it. + cli := &nopCloserClient{client} + return newAsyncProducer(cli) +} + +func newAsyncProducer(client Client) (AsyncProducer, error) { // Check that we are not dealing with a closed Client before processing any other arguments if client.Closed() { return nil, ErrClosedClient @@ -483,6 +483,19 @@ func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan return input } +func (pp *partitionProducer) backoff(retries int) { + var backoff time.Duration + if pp.parent.conf.Producer.Retry.BackoffFunc != nil { + maxRetries := pp.parent.conf.Producer.Retry.Max + backoff = pp.parent.conf.Producer.Retry.BackoffFunc(retries, maxRetries) + } else { + backoff = pp.parent.conf.Producer.Retry.Backoff + } + if backoff > 0 { + time.Sleep(backoff) + } +} + func (pp *partitionProducer) dispatch() { // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader` // on the first message @@ -493,11 +506,31 @@ func (pp *partitionProducer) dispatch() { pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} } + defer func() { + if pp.brokerProducer != nil { + pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer) + } + }() + for msg := range pp.input { + + if pp.brokerProducer != nil && pp.brokerProducer.abandoned != nil { + select { + case <-pp.brokerProducer.abandoned: + // a message on the abandoned channel means that our current broker selection is out of date + Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer) + pp.brokerProducer = nil + time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + default: + // producer connection is still open. + } + } + if msg.retries > pp.highWatermark { // a new, higher, retry level; handle it and then back off pp.newHighWatermark(msg.retries) - time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + pp.backoff(msg.retries) } else if pp.highWatermark > 0 { // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level if msg.retries < pp.highWatermark { @@ -525,7 +558,7 @@ func (pp *partitionProducer) dispatch() { if pp.brokerProducer == nil { if err := pp.updateLeader(); err != nil { pp.parent.returnError(msg, err) - time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + pp.backoff(msg.retries) continue } Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) @@ -533,10 +566,6 @@ func (pp *partitionProducer) dispatch() { pp.brokerProducer.input <- msg } - - if pp.brokerProducer != nil { - pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer) - } } func (pp *partitionProducer) newHighWatermark(hwm int) { @@ -637,6 +666,10 @@ func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer { close(responses) }) + if p.conf.Producer.Retry.Max <= 0 { + bp.abandoned = make(chan struct{}) + } + return bp } @@ -655,6 +688,7 @@ type brokerProducer struct { input chan *ProducerMessage output chan<- *produceSet responses <-chan *brokerProducerResponse + abandoned chan struct{} buffer *produceSet timer <-chan time.Time @@ -829,9 +863,17 @@ func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceRespo // Retriable errors case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: - retryTopics = append(retryTopics, topic) + if bp.parent.conf.Producer.Retry.Max <= 0 { + bp.parent.abandonBrokerConnection(bp.broker) + bp.parent.returnErrors(pSet.msgs, block.Err) + } else { + retryTopics = append(retryTopics, topic) + } // Other non-retriable errors default: + if bp.parent.conf.Producer.Retry.Max <= 0 { + bp.parent.abandonBrokerConnection(bp.broker) + } bp.parent.returnErrors(pSet.msgs, block.Err) } }) @@ -957,11 +999,9 @@ func (p *asyncProducer) shutdown() { p.inFlight.Wait() - if p.ownClient { - err := p.client.Close() - if err != nil { - Logger.Println("producer/shutdown failed to close the embedded client:", err) - } + err := p.client.Close() + if err != nil { + Logger.Println("producer/shutdown failed to close the embedded client:", err) } close(p.input) @@ -1048,5 +1088,10 @@ func (p *asyncProducer) abandonBrokerConnection(broker *Broker) { p.brokerLock.Lock() defer p.brokerLock.Unlock() + bc, ok := p.brokers[broker] + if ok && bc.abandoned != nil { + close(bc.abandoned) + } + delete(p.brokers, broker) } diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/Shopify/sarama/balance_strategy.go index e78988d7..2fce17fb 100644 --- a/vendor/github.com/Shopify/sarama/balance_strategy.go +++ b/vendor/github.com/Shopify/sarama/balance_strategy.go @@ -24,7 +24,7 @@ func (p BalanceStrategyPlan) Add(memberID, topic string, partitions ...int32) { // -------------------------------------------------------------------- // BalanceStrategy is used to balance topics and partitions -// across memebers of a consumer group +// across members of a consumer group type BalanceStrategy interface { // Name uniquely identifies the strategy. Name() string @@ -78,7 +78,7 @@ type balanceStrategy struct { // Name implements BalanceStrategy. func (s *balanceStrategy) Name() string { return s.name } -// Balance implements BalanceStrategy. +// Plan implements BalanceStrategy. func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) { // Build members by topic map mbt := make(map[string][]string) diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go index 6a33b802..53c7f8e6 100644 --- a/vendor/github.com/Shopify/sarama/broker.go +++ b/vendor/github.com/Shopify/sarama/broker.go @@ -6,7 +6,9 @@ import ( "fmt" "io" "net" + "sort" "strconv" + "strings" "sync" "sync/atomic" "time" @@ -16,19 +18,20 @@ import ( // Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe. type Broker struct { - id int32 - addr string + conf *Config rack *string - conf *Config + id int32 + addr string correlationID int32 conn net.Conn connErr error lock sync.Mutex opened int32 + responses chan responsePromise + done chan bool - responses chan responsePromise - done chan bool + registeredMetrics []string incomingByteRate metrics.Meter requestRate metrics.Meter @@ -46,6 +49,68 @@ type Broker struct { brokerResponseSize metrics.Histogram } +// SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker +type SASLMechanism string + +const ( + // SASLTypeOAuth represents the SASL/OAUTHBEARER mechanism (Kafka 2.0.0+) + SASLTypeOAuth = "OAUTHBEARER" + // SASLTypePlaintext represents the SASL/PLAIN mechanism + SASLTypePlaintext = "PLAIN" + // SASLTypeSCRAMSHA256 represents the SCRAM-SHA-256 mechanism. + SASLTypeSCRAMSHA256 = "SCRAM-SHA-256" + // SASLTypeSCRAMSHA512 represents the SCRAM-SHA-512 mechanism. + SASLTypeSCRAMSHA512 = "SCRAM-SHA-512" + // SASLHandshakeV0 is v0 of the Kafka SASL handshake protocol. Client and + // server negotiate SASL auth using opaque packets. + SASLHandshakeV0 = int16(0) + // SASLHandshakeV1 is v1 of the Kafka SASL handshake protocol. Client and + // server negotiate SASL by wrapping tokens with Kafka protocol headers. + SASLHandshakeV1 = int16(1) + // SASLExtKeyAuth is the reserved extension key name sent as part of the + // SASL/OAUTHBEARER intial client response + SASLExtKeyAuth = "auth" +) + +// AccessToken contains an access token used to authenticate a +// SASL/OAUTHBEARER client along with associated metadata. +type AccessToken struct { + // Token is the access token payload. + Token string + // Extensions is a optional map of arbitrary key-value pairs that can be + // sent with the SASL/OAUTHBEARER initial client response. These values are + // ignored by the SASL server if they are unexpected. This feature is only + // supported by Kafka >= 2.1.0. + Extensions map[string]string +} + +// AccessTokenProvider is the interface that encapsulates how implementors +// can generate access tokens for Kafka broker authentication. +type AccessTokenProvider interface { + // Token returns an access token. The implementation should ensure token + // reuse so that multiple calls at connect time do not create multiple + // tokens. The implementation should also periodically refresh the token in + // order to guarantee that each call returns an unexpired token. This + // method should not block indefinitely--a timeout error should be returned + // after a short period of inactivity so that the broker connection logic + // can log debugging information and retry. + Token() (*AccessToken, error) +} + +// SCRAMClient is a an interface to a SCRAM +// client implementation. +type SCRAMClient interface { + // Begin prepares the client for the SCRAM exchange + // with the server with a user name and a password + Begin(userName, password, authzID string) error + // Step steps client through the SCRAM exchange. It is + // called repeatedly until it errors or `Done` returns true. + Step(challenge string) (response string, err error) + // Done should return true when the SCRAM conversation + // is over. + Done() bool +} + type responsePromise struct { requestTime time.Time correlationID int32 @@ -91,6 +156,8 @@ func (b *Broker) Open(conf *Config) error { if conf.Net.TLS.Enable { b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config) + } else if conf.Net.Proxy.Enable { + b.conn, b.connErr = conf.Net.Proxy.Dialer.Dial("tcp", b.addr) } else { b.conn, b.connErr = dialer.Dial("tcp", b.addr) } @@ -115,17 +182,13 @@ func (b *Broker) Open(conf *Config) error { // Do not gather metrics for seeded broker (only used during bootstrap) because they share // the same id (-1) and are already exposed through the global metrics above if b.id >= 0 { - b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry) - b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry) - b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry) - b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry) - b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry) - b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry) - b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry) + b.registerMetrics() } if conf.Net.SASL.Enable { - b.connErr = b.sendAndReceiveSASLPlainAuth() + + b.connErr = b.authenticateViaSASL() + if b.connErr != nil { err = b.conn.Close() if err == nil { @@ -162,6 +225,7 @@ func (b *Broker) Connected() (bool, error) { return b.conn != nil, b.connErr } +//Close closes the broker resources func (b *Broker) Close() error { b.lock.Lock() defer b.lock.Unlock() @@ -180,12 +244,7 @@ func (b *Broker) Close() error { b.done = nil b.responses = nil - if b.id >= 0 { - b.conf.MetricRegistry.Unregister(getMetricNameForBroker("incoming-byte-rate", b)) - b.conf.MetricRegistry.Unregister(getMetricNameForBroker("request-rate", b)) - b.conf.MetricRegistry.Unregister(getMetricNameForBroker("outgoing-byte-rate", b)) - b.conf.MetricRegistry.Unregister(getMetricNameForBroker("response-rate", b)) - } + b.unregisterMetrics() if err == nil { Logger.Printf("Closed connection to broker %s\n", b.addr) @@ -219,6 +278,7 @@ func (b *Broker) Rack() string { return *b.rack } +//GetMetadata send a metadata request and returns a metadata response or error func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) { response := new(MetadataResponse) @@ -231,6 +291,7 @@ func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error return response, nil } +//GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) { response := new(ConsumerMetadataResponse) @@ -243,6 +304,7 @@ func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*Consume return response, nil } +//FindCoordinator sends a find coordinate request and returns a response or error func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) { response := new(FindCoordinatorResponse) @@ -255,6 +317,7 @@ func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordina return response, nil } +//GetAvailableOffsets return an offset response or error func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) { response := new(OffsetResponse) @@ -267,9 +330,12 @@ func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, e return response, nil } +//Produce returns a produce response or error func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { - var response *ProduceResponse - var err error + var ( + response *ProduceResponse + err error + ) if request.RequiredAcks == NoResponse { err = b.sendAndReceive(request, nil) @@ -285,11 +351,11 @@ func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { return response, nil } +//Fetch returns a FetchResponse or error func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { response := new(FetchResponse) err := b.sendAndReceive(request, response) - if err != nil { return nil, err } @@ -297,11 +363,11 @@ func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { return response, nil } +//CommitOffset return an Offset commit reponse or error func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) { response := new(OffsetCommitResponse) err := b.sendAndReceive(request, response) - if err != nil { return nil, err } @@ -309,11 +375,11 @@ func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitRespon return response, nil } +//FetchOffset returns an offset fetch response or error func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) { response := new(OffsetFetchResponse) err := b.sendAndReceive(request, response) - if err != nil { return nil, err } @@ -321,6 +387,7 @@ func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, return response, nil } +//JoinGroup returns a join group response or error func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) { response := new(JoinGroupResponse) @@ -332,6 +399,7 @@ func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error return response, nil } +//SyncGroup returns a sync group response or error func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) { response := new(SyncGroupResponse) @@ -343,6 +411,7 @@ func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error return response, nil } +//LeaveGroup return a leave group response or error func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) { response := new(LeaveGroupResponse) @@ -354,6 +423,7 @@ func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, er return response, nil } +//Heartbeat returns a heartbeat response or error func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) { response := new(HeartbeatResponse) @@ -365,6 +435,7 @@ func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error return response, nil } +//ListGroups return a list group response or error func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) { response := new(ListGroupsResponse) @@ -376,6 +447,7 @@ func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, er return response, nil } +//DescribeGroups return describe group response or error func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) { response := new(DescribeGroupsResponse) @@ -387,6 +459,7 @@ func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroups return response, nil } +//ApiVersions return api version response or error func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) { response := new(ApiVersionsResponse) @@ -398,6 +471,7 @@ func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, return response, nil } +//CreateTopics send a create topic request and returns create topic response func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) { response := new(CreateTopicsResponse) @@ -409,6 +483,7 @@ func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsRespon return response, nil } +//DeleteTopics sends a delete topic request and returns delete topic response func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) { response := new(DeleteTopicsResponse) @@ -420,6 +495,8 @@ func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsRespon return response, nil } +//CreatePartitions sends a create partition request and returns create +//partitions response or error func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) { response := new(CreatePartitionsResponse) @@ -431,6 +508,8 @@ func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePart return response, nil } +//DeleteRecords send a request to delete records and return delete record +//response or error func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) { response := new(DeleteRecordsResponse) @@ -442,6 +521,7 @@ func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsRes return response, nil } +//DescribeAcls sends a describe acl request and returns a response or error func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) { response := new(DescribeAclsResponse) @@ -453,6 +533,7 @@ func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsRespon return response, nil } +//CreateAcls sends a create acl request and returns a response or error func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) { response := new(CreateAclsResponse) @@ -464,6 +545,7 @@ func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, er return response, nil } +//DeleteAcls sends a delete acl request and returns a response or error func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) { response := new(DeleteAclsResponse) @@ -475,6 +557,7 @@ func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, er return response, nil } +//InitProducerID sends an init producer request and returns a response or error func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) { response := new(InitProducerIDResponse) @@ -486,6 +569,8 @@ func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerID return response, nil } +//AddPartitionsToTxn send a request to add partition to txn and returns +//a response or error func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) { response := new(AddPartitionsToTxnResponse) @@ -497,6 +582,8 @@ func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPar return response, nil } +//AddOffsetsToTxn sends a request to add offsets to txn and returns a response +//or error func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) { response := new(AddOffsetsToTxnResponse) @@ -508,6 +595,7 @@ func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsTo return response, nil } +//EndTxn sends a request to end txn and returns a response or error func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) { response := new(EndTxnResponse) @@ -519,6 +607,8 @@ func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) { return response, nil } +//TxnOffsetCommit sends a request to commit transaction offsets and returns +//a response or error func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) { response := new(TxnOffsetCommitResponse) @@ -530,6 +620,8 @@ func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCom return response, nil } +//DescribeConfigs sends a request to describe config and returns a response or +//error func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) { response := new(DescribeConfigsResponse) @@ -541,6 +633,7 @@ func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConf return response, nil } +//AlterConfigs sends a request to alter config and return a response or error func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) { response := new(AlterConfigsResponse) @@ -552,6 +645,7 @@ func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsRespon return response, nil } +//DeleteGroups sends a request to delete groups and returns a response or error func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) { response := new(DeleteGroupsResponse) @@ -590,7 +684,7 @@ func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, requestTime := time.Now() bytes, err := b.conn.Write(buf) - b.updateOutgoingCommunicationMetrics(bytes) + b.updateOutgoingCommunicationMetrics(bytes) //TODO: should it be after error check if err != nil { return nil, err } @@ -610,7 +704,6 @@ func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error { promise, err := b.send(req, res != nil) - if err != nil { return err } @@ -659,11 +752,11 @@ func (b *Broker) decode(pd packetDecoder, version int16) (err error) { } func (b *Broker) encode(pe packetEncoder, version int16) (err error) { - host, portstr, err := net.SplitHostPort(b.addr) if err != nil { return err } + port, err := strconv.Atoi(portstr) if err != nil { return err @@ -691,6 +784,7 @@ func (b *Broker) encode(pe packetEncoder, version int16) (err error) { func (b *Broker) responseReceiver() { var dead error header := make([]byte, 8) + for response := range b.responses { if dead != nil { response.errors <- dead @@ -744,8 +838,20 @@ func (b *Broker) responseReceiver() { close(b.done) } -func (b *Broker) sendAndReceiveSASLPlainHandshake() error { - rb := &SaslHandshakeRequest{"PLAIN"} +func (b *Broker) authenticateViaSASL() error { + switch b.conf.Net.SASL.Mechanism { + case SASLTypeOAuth: + return b.sendAndReceiveSASLOAuth(b.conf.Net.SASL.TokenProvider) + case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512: + return b.sendAndReceiveSASLSCRAMv1() + default: + return b.sendAndReceiveSASLPlainAuth() + } +} + +func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int16) error { + rb := &SaslHandshakeRequest{Mechanism: string(saslType), Version: version} + req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} buf, err := encode(req, b.conf.MetricRegistry) if err != nil { @@ -772,6 +878,7 @@ func (b *Broker) sendAndReceiveSASLPlainHandshake() error { Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error()) return err } + length := binary.BigEndian.Uint32(header[:4]) payload := make([]byte, length-4) n, err := io.ReadFull(b.conn, payload) @@ -779,18 +886,22 @@ func (b *Broker) sendAndReceiveSASLPlainHandshake() error { Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error()) return err } + b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime)) res := &SaslHandshakeResponse{} + err = versionedDecode(payload, res, 0) if err != nil { Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error()) return err } + if res.Err != ErrNoError { Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error()) return res.Err } - Logger.Print("Successful SASL handshake") + + Logger.Print("Successful SASL handshake. Available mechanisms: ", res.EnabledMechanisms) return nil } @@ -814,12 +925,13 @@ func (b *Broker) sendAndReceiveSASLPlainHandshake() error { // of responding to bad credentials but thats how its being done today. func (b *Broker) sendAndReceiveSASLPlainAuth() error { if b.conf.Net.SASL.Handshake { - handshakeErr := b.sendAndReceiveSASLPlainHandshake() + handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, SASLHandshakeV0) if handshakeErr != nil { Logger.Printf("Error while performing SASL handshake %s\n", b.addr) return handshakeErr } } + length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password) authBytes := make([]byte, length+4) //4 byte length header + auth data binary.BigEndian.PutUint32(authBytes, uint32(length)) @@ -853,17 +965,245 @@ func (b *Broker) sendAndReceiveSASLPlainAuth() error { return nil } +// sendAndReceiveSASLOAuth performs the authentication flow as described by KIP-255 +// https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=75968876 +func (b *Broker) sendAndReceiveSASLOAuth(provider AccessTokenProvider) error { + if err := b.sendAndReceiveSASLHandshake(SASLTypeOAuth, SASLHandshakeV1); err != nil { + return err + } + + token, err := provider.Token() + if err != nil { + return err + } + + requestTime := time.Now() + correlationID := b.correlationID + + bytesWritten, err := b.sendSASLOAuthBearerClientResponse(token, correlationID) + if err != nil { + return err + } + + b.updateOutgoingCommunicationMetrics(bytesWritten) + b.correlationID++ + + bytesRead, err := b.receiveSASLOAuthBearerServerResponse(correlationID) + if err != nil { + return err + } + + requestLatency := time.Since(requestTime) + b.updateIncomingCommunicationMetrics(bytesRead, requestLatency) + + return nil +} + +func (b *Broker) sendAndReceiveSASLSCRAMv1() error { + if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV1); err != nil { + return err + } + + scramClient := b.conf.Net.SASL.SCRAMClient + if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil { + return fmt.Errorf("failed to start SCRAM exchange with the server: %s", err.Error()) + } + + msg, err := scramClient.Step("") + if err != nil { + return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error()) + + } + + for !scramClient.Done() { + requestTime := time.Now() + correlationID := b.correlationID + bytesWritten, err := b.sendSaslAuthenticateRequest(correlationID, []byte(msg)) + if err != nil { + Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) + return err + } + + b.updateOutgoingCommunicationMetrics(bytesWritten) + b.correlationID++ + challenge, err := b.receiveSaslAuthenticateResponse(correlationID) + if err != nil { + Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) + return err + } + + b.updateIncomingCommunicationMetrics(len(challenge), time.Since(requestTime)) + msg, err = scramClient.Step(string(challenge)) + if err != nil { + Logger.Println("SASL authentication failed", err) + return err + } + } + + Logger.Println("SASL authentication succeeded") + return nil +} + +func (b *Broker) sendSaslAuthenticateRequest(correlationID int32, msg []byte) (int, error) { + rb := &SaslAuthenticateRequest{msg} + req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return 0, err + } + + if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil { + return 0, err + } + + return b.conn.Write(buf) +} + +func (b *Broker) receiveSaslAuthenticateResponse(correlationID int32) ([]byte, error) { + buf := make([]byte, responseLengthSize+correlationIDSize) + _, err := io.ReadFull(b.conn, buf) + if err != nil { + return nil, err + } + + header := responseHeader{} + err = decode(buf, &header) + if err != nil { + return nil, err + } + + if header.correlationID != correlationID { + return nil, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID) + } + + buf = make([]byte, header.length-correlationIDSize) + _, err = io.ReadFull(b.conn, buf) + if err != nil { + return nil, err + } + + res := &SaslAuthenticateResponse{} + if err := versionedDecode(buf, res, 0); err != nil { + return nil, err + } + if res.Err != ErrNoError { + return nil, res.Err + } + return res.SaslAuthBytes, nil +} + +// Build SASL/OAUTHBEARER initial client response as described by RFC-7628 +// https://tools.ietf.org/html/rfc7628 +func buildClientInitialResponse(token *AccessToken) ([]byte, error) { + var ext string + + if token.Extensions != nil && len(token.Extensions) > 0 { + if _, ok := token.Extensions[SASLExtKeyAuth]; ok { + return []byte{}, fmt.Errorf("the extension `%s` is invalid", SASLExtKeyAuth) + } + ext = "\x01" + mapToString(token.Extensions, "=", "\x01") + } + + resp := []byte(fmt.Sprintf("n,,\x01auth=Bearer %s%s\x01\x01", token.Token, ext)) + + return resp, nil +} + +// mapToString returns a list of key-value pairs ordered by key. +// keyValSep separates the key from the value. elemSep separates each pair. +func mapToString(extensions map[string]string, keyValSep string, elemSep string) string { + buf := make([]string, 0, len(extensions)) + + for k, v := range extensions { + buf = append(buf, k+keyValSep+v) + } + + sort.Strings(buf) + + return strings.Join(buf, elemSep) +} + +func (b *Broker) sendSASLOAuthBearerClientResponse(token *AccessToken, correlationID int32) (int, error) { + initialResp, err := buildClientInitialResponse(token) + if err != nil { + return 0, err + } + + rb := &SaslAuthenticateRequest{initialResp} + + req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} + + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return 0, err + } + + if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil { + return 0, err + } + + return b.conn.Write(buf) +} + +func (b *Broker) receiveSASLOAuthBearerServerResponse(correlationID int32) (int, error) { + + buf := make([]byte, responseLengthSize+correlationIDSize) + + bytesRead, err := io.ReadFull(b.conn, buf) + if err != nil { + return bytesRead, err + } + + header := responseHeader{} + + err = decode(buf, &header) + if err != nil { + return bytesRead, err + } + + if header.correlationID != correlationID { + return bytesRead, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID) + } + + buf = make([]byte, header.length-correlationIDSize) + + c, err := io.ReadFull(b.conn, buf) + bytesRead += c + if err != nil { + return bytesRead, err + } + + res := &SaslAuthenticateResponse{} + + if err := versionedDecode(buf, res, 0); err != nil { + return bytesRead, err + } + + if res.Err != ErrNoError { + return bytesRead, res.Err + } + + if len(res.SaslAuthBytes) > 0 { + Logger.Printf("Received SASL auth response: %s", res.SaslAuthBytes) + } + + return bytesRead, nil +} + func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) { b.updateRequestLatencyMetrics(requestLatency) b.responseRate.Mark(1) + if b.brokerResponseRate != nil { b.brokerResponseRate.Mark(1) } + responseSize := int64(bytes) b.incomingByteRate.Mark(responseSize) if b.brokerIncomingByteRate != nil { b.brokerIncomingByteRate.Mark(responseSize) } + b.responseSize.Update(responseSize) if b.brokerResponseSize != nil { b.brokerResponseSize.Update(responseSize) @@ -873,9 +1213,11 @@ func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency ti func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) { requestLatencyInMs := int64(requestLatency / time.Millisecond) b.requestLatency.Update(requestLatencyInMs) + if b.brokerRequestLatency != nil { b.brokerRequestLatency.Update(requestLatencyInMs) } + } func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) { @@ -883,13 +1225,44 @@ func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) { if b.brokerRequestRate != nil { b.brokerRequestRate.Mark(1) } + requestSize := int64(bytes) b.outgoingByteRate.Mark(requestSize) if b.brokerOutgoingByteRate != nil { b.brokerOutgoingByteRate.Mark(requestSize) } + b.requestSize.Update(requestSize) if b.brokerRequestSize != nil { b.brokerRequestSize.Update(requestSize) } + +} + +func (b *Broker) registerMetrics() { + b.brokerIncomingByteRate = b.registerMeter("incoming-byte-rate") + b.brokerRequestRate = b.registerMeter("request-rate") + b.brokerRequestSize = b.registerHistogram("request-size") + b.brokerRequestLatency = b.registerHistogram("request-latency-in-ms") + b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate") + b.brokerResponseRate = b.registerMeter("response-rate") + b.brokerResponseSize = b.registerHistogram("response-size") +} + +func (b *Broker) unregisterMetrics() { + for _, name := range b.registeredMetrics { + b.conf.MetricRegistry.Unregister(name) + } +} + +func (b *Broker) registerMeter(name string) metrics.Meter { + nameForBroker := getMetricNameForBroker(name, b) + b.registeredMetrics = append(b.registeredMetrics, nameForBroker) + return metrics.GetOrRegisterMeter(nameForBroker, b.conf.MetricRegistry) +} + +func (b *Broker) registerHistogram(name string) metrics.Histogram { + nameForBroker := getMetricNameForBroker(name, b) + b.registeredMetrics = append(b.registeredMetrics, nameForBroker) + return getOrRegisterHistogram(nameForBroker, b.conf.MetricRegistry) } diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go index 79be5ce5..b959b60e 100644 --- a/vendor/github.com/Shopify/sarama/client.go +++ b/vendor/github.com/Shopify/sarama/client.go @@ -46,6 +46,10 @@ type Client interface { // the partition leader. InSyncReplicas(topic string, partitionID int32) ([]int32, error) + // OfflineReplicas returns the set of all offline replica IDs for the given + // partition. Offline replicas are replicas which are offline + OfflineReplicas(topic string, partitionID int32) ([]int32, error) + // RefreshMetadata takes a list of topics and queries the cluster to refresh the // available metadata for those topics. If no topics are provided, it will refresh // metadata for all topics. @@ -288,7 +292,8 @@ func (client *client) Partitions(topic string) ([]int32, error) { partitions = client.cachedPartitions(topic, allPartitions) } - if partitions == nil { + // no partitions found after refresh metadata + if len(partitions) == 0 { return nil, ErrUnknownTopicOrPartition } @@ -373,6 +378,31 @@ func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, return dupInt32Slice(metadata.Isr), nil } +func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + metadata := client.cachedMetadata(topic, partitionID) + + if metadata == nil { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + metadata = client.cachedMetadata(topic, partitionID) + } + + if metadata == nil { + return nil, ErrUnknownTopicOrPartition + } + + if metadata.Err == ErrReplicaNotAvailable { + return dupInt32Slice(metadata.OfflineReplicas), metadata.Err + } + return dupInt32Slice(metadata.OfflineReplicas), nil +} + func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { if client.Closed() { return nil, ErrClosedClient @@ -710,8 +740,11 @@ func (client *client) refreshMetadata() error { func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error { retry := func(err error) error { if attemptsRemaining > 0 { + backoff := client.computeBackoff(attemptsRemaining) Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) - time.Sleep(client.conf.Metadata.Retry.Backoff) + if backoff > 0 { + time.Sleep(backoff) + } return client.tryRefreshMetadata(topics, attemptsRemaining-1) } return err @@ -725,11 +758,12 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) } req := &MetadataRequest{Topics: topics} - if client.conf.Version.IsAtLeast(V0_10_0_0) { + if client.conf.Version.IsAtLeast(V1_0_0_0) { + req.Version = 5 + } else if client.conf.Version.IsAtLeast(V0_10_0_0) { req.Version = 1 } response, err := broker.GetMetadata(req) - switch err.(type) { case nil: allKnownMetaData := len(topics) == 0 @@ -789,7 +823,7 @@ func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bo switch topic.Err { case ErrNoError: - break + // no-op case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results err = topic.Err continue @@ -799,7 +833,6 @@ func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bo continue case ErrLeaderNotAvailable: // retry, but store partial partition results retry = true - break default: // don't retry, don't store partial results Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err) err = topic.Err @@ -839,11 +872,21 @@ func (client *client) cachedController() *Broker { return client.brokers[client.controllerID] } +func (client *client) computeBackoff(attemptsRemaining int) time.Duration { + if client.conf.Metadata.Retry.BackoffFunc != nil { + maxRetries := client.conf.Metadata.Retry.Max + retries := maxRetries - attemptsRemaining + return client.conf.Metadata.Retry.BackoffFunc(retries, maxRetries) + } + return client.conf.Metadata.Retry.Backoff +} + func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) { retry := func(err error) (*FindCoordinatorResponse, error) { if attemptsRemaining > 0 { - Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) - time.Sleep(client.conf.Metadata.Retry.Backoff) + backoff := client.computeBackoff(attemptsRemaining) + Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) + time.Sleep(backoff) return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1) } return nil, err @@ -897,3 +940,18 @@ func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemainin client.resurrectDeadBrokers() return retry(ErrOutOfBrokers) } + +// nopCloserClient embeds an existing Client, but disables +// the Close method (yet all other methods pass +// through unchanged). This is for use in larger structs +// where it is undesirable to close the client that was +// passed in by the caller. +type nopCloserClient struct { + Client +} + +// Close intercepts and purposely does not call the underlying +// client's Close() method. +func (ncc *nopCloserClient) Close() error { + return nil +} diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/Shopify/sarama/compress.go new file mode 100644 index 00000000..94b716e4 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/compress.go @@ -0,0 +1,75 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "sync" + + "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4" +) + +var ( + lz4WriterPool = sync.Pool{ + New: func() interface{} { + return lz4.NewWriter(nil) + }, + } + + gzipWriterPool = sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, + } +) + +func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) { + switch cc { + case CompressionNone: + return data, nil + case CompressionGZIP: + var ( + err error + buf bytes.Buffer + writer *gzip.Writer + ) + if level != CompressionLevelDefault { + writer, err = gzip.NewWriterLevel(&buf, level) + if err != nil { + return nil, err + } + } else { + writer = gzipWriterPool.Get().(*gzip.Writer) + defer gzipWriterPool.Put(writer) + writer.Reset(&buf) + } + if _, err := writer.Write(data); err != nil { + return nil, err + } + if err := writer.Close(); err != nil { + return nil, err + } + return buf.Bytes(), nil + case CompressionSnappy: + return snappy.Encode(data), nil + case CompressionLZ4: + writer := lz4WriterPool.Get().(*lz4.Writer) + defer lz4WriterPool.Put(writer) + + var buf bytes.Buffer + writer.Reset(&buf) + + if _, err := writer.Write(data); err != nil { + return nil, err + } + if err := writer.Close(); err != nil { + return nil, err + } + return buf.Bytes(), nil + case CompressionZSTD: + return zstdCompressLevel(nil, data, level) + default: + return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)} + } +} diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go index ed205229..dca186ea 100644 --- a/vendor/github.com/Shopify/sarama/config.go +++ b/vendor/github.com/Shopify/sarama/config.go @@ -10,6 +10,7 @@ import ( "time" "github.com/rcrowley/go-metrics" + "golang.org/x/net/proxy" ) const defaultClientID = "sarama" @@ -54,13 +55,26 @@ type Config struct { // Whether or not to use SASL authentication when connecting to the broker // (defaults to false). Enable bool + // SASLMechanism is the name of the enabled SASL mechanism. + // Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN). + Mechanism SASLMechanism // Whether or not to send the Kafka SASL handshake first if enabled // (defaults to true). You should only set this to false if you're using // a non-Kafka SASL proxy. Handshake bool - //username and password for SASL/PLAIN authentication + //username and password for SASL/PLAIN or SASL/SCRAM authentication User string Password string + // authz id used for SASL/SCRAM authentication + SCRAMAuthzID string + // SCRAMClient is a user provided implementation of a SCRAM + // client used to perform the SCRAM exchange with the server. + SCRAMClient SCRAMClient + // TokenProvider is a user-defined callback for generating + // access tokens for SASL/OAUTHBEARER auth. See the + // AccessTokenProvider interface docs for proper implementation + // guidelines. + TokenProvider AccessTokenProvider } // KeepAlive specifies the keep-alive period for an active network connection. @@ -72,6 +86,14 @@ type Config struct { // network being dialed. // If nil, a local address is automatically chosen. LocalAddr net.Addr + + Proxy struct { + // Whether or not to use proxy when connecting to the broker + // (defaults to false). + Enable bool + // The proxy dialer to use enabled (defaults to nil). + Dialer proxy.Dialer + } } // Metadata is the namespace for metadata management properties used by the @@ -84,6 +106,10 @@ type Config struct { // How long to wait for leader election to occur before retrying // (default 250ms). Similar to the JVM's `retry.backoff.ms`. Backoff time.Duration + // Called to compute backoff time dynamically. Useful for implementing + // more sophisticated backoff strategies. This takes precedence over + // `Backoff` if set. + BackoffFunc func(retries, maxRetries int) time.Duration } // How frequently to refresh the cluster metadata in the background. // Defaults to 10 minutes. Set to 0 to disable. Similar to @@ -171,6 +197,10 @@ type Config struct { // (default 100ms). Similar to the `retry.backoff.ms` setting of the // JVM producer. Backoff time.Duration + // Called to compute backoff time dynamically. Useful for implementing + // more sophisticated backoff strategies. This takes precedence over + // `Backoff` if set. + BackoffFunc func(retries, maxRetries int) time.Duration } } @@ -229,6 +259,10 @@ type Config struct { // How long to wait after a failing to read from a partition before // trying again (default 2s). Backoff time.Duration + // Called to compute backoff time dynamically. Useful for implementing + // more sophisticated backoff strategies. This takes precedence over + // `Backoff` if set. + BackoffFunc func(retries int) time.Duration } // Fetch is the namespace for controlling how many bytes are retrieved by any @@ -313,6 +347,11 @@ type Config struct { Max int } } + + // IsolationLevel support 2 mode: + // - use `ReadUncommitted` (default) to consume and return all messages in message channel + // - use `ReadCommitted` to hide messages that are part of an aborted transaction + IsolationLevel IsolationLevel } // A user-provided string sent with every request to the brokers for logging, @@ -394,10 +433,10 @@ func NewConfig() *Config { // ConfigurationError if the specified values don't make sense. func (c *Config) Validate() error { // some configuration values should be warned on but not fail completely, do those first - if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil { + if !c.Net.TLS.Enable && c.Net.TLS.Config != nil { Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.") } - if c.Net.SASL.Enable == false { + if !c.Net.SASL.Enable { if c.Net.SASL.User != "" { Logger.Println("Net.SASL is disabled but a non-empty username was provided.") } @@ -454,10 +493,38 @@ func (c *Config) Validate() error { return ConfigurationError("Net.WriteTimeout must be > 0") case c.Net.KeepAlive < 0: return ConfigurationError("Net.KeepAlive must be >= 0") - case c.Net.SASL.Enable == true && c.Net.SASL.User == "": - return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") - case c.Net.SASL.Enable == true && c.Net.SASL.Password == "": - return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") + case c.Net.SASL.Enable: + if c.Net.SASL.Mechanism == "" { + c.Net.SASL.Mechanism = SASLTypePlaintext + } + + switch c.Net.SASL.Mechanism { + case SASLTypePlaintext: + if c.Net.SASL.User == "" { + return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") + } + if c.Net.SASL.Password == "" { + return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") + } + case SASLTypeOAuth: + if c.Net.SASL.TokenProvider == nil { + return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider") + } + case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512: + if c.Net.SASL.User == "" { + return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") + } + if c.Net.SASL.Password == "" { + return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") + } + if c.Net.SASL.SCRAMClient == nil { + return ConfigurationError("A SCRAMClient instance must be provided to Net.SASL.SCRAMClient") + } + default: + msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s`, `%s`, `%s` and `%s`", + SASLTypeOAuth, SASLTypePlaintext, SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512) + return ConfigurationError(msg) + } } // validate the Admin values @@ -549,6 +616,13 @@ func (c *Config) Validate() error { return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest") case c.Consumer.Offsets.Retry.Max < 0: return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0") + case c.Consumer.IsolationLevel != ReadUncommitted && c.Consumer.IsolationLevel != ReadCommitted: + return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted") + } + + // validate IsolationLevel + if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) { + return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0") } // validate the Consumer Group values diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/Shopify/sarama/config_resource_type.go index 848cc9c9..5399d75c 100644 --- a/vendor/github.com/Shopify/sarama/config_resource_type.go +++ b/vendor/github.com/Shopify/sarama/config_resource_type.go @@ -1,15 +1,22 @@ package sarama +//ConfigResourceType is a type for config resource type ConfigResourceType int8 // Taken from : // https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs#KIP-133:DescribeandAlterConfigsAdminAPIs-WireFormattypes const ( - UnknownResource ConfigResourceType = 0 - AnyResource ConfigResourceType = 1 - TopicResource ConfigResourceType = 2 - GroupResource ConfigResourceType = 3 - ClusterResource ConfigResourceType = 4 - BrokerResource ConfigResourceType = 5 + //UnknownResource constant type + UnknownResource ConfigResourceType = iota + //AnyResource constant type + AnyResource + //TopicResource constant type + TopicResource + //GroupResource constant type + GroupResource + //ClusterResource constant type + ClusterResource + //BrokerResource constant type + BrokerResource ) diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go index 33d9d143..461026eb 100644 --- a/vendor/github.com/Shopify/sarama/consumer.go +++ b/vendor/github.com/Shopify/sarama/consumer.go @@ -6,17 +6,20 @@ import ( "sync" "sync/atomic" "time" + + "github.com/rcrowley/go-metrics" ) // ConsumerMessage encapsulates a Kafka message returned by the consumer. type ConsumerMessage struct { - Key, Value []byte - Topic string - Partition int32 - Offset int64 + Headers []*RecordHeader // only set if kafka is version 0.11+ Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp - Headers []*RecordHeader // only set if kafka is version 0.11+ + + Key, Value []byte + Topic string + Partition int32 + Offset int64 } // ConsumerError is what is provided to the user when an error occurs. @@ -43,13 +46,7 @@ func (ce ConsumerErrors) Error() string { // Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close() // on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of // scope. -// -// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking. -// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library -// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the -// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. type Consumer interface { - // Topics returns the set of available topics as retrieved from the cluster // metadata. This method is the same as Client.Topics(), and is provided for // convenience. @@ -75,13 +72,11 @@ type Consumer interface { } type consumer struct { - client Client - conf *Config - ownClient bool - - lock sync.Mutex + conf *Config children map[string]map[int32]*partitionConsumer brokerConsumers map[*Broker]*brokerConsumer + client Client + lock sync.Mutex } // NewConsumer creates a new consumer using the given broker addresses and configuration. @@ -90,18 +85,19 @@ func NewConsumer(addrs []string, config *Config) (Consumer, error) { if err != nil { return nil, err } - - c, err := NewConsumerFromClient(client) - if err != nil { - return nil, err - } - c.(*consumer).ownClient = true - return c, nil + return newConsumer(client) } // NewConsumerFromClient creates a new consumer using the given client. It is still // necessary to call Close() on the underlying client when shutting down this consumer. func NewConsumerFromClient(client Client) (Consumer, error) { + // For clients passed in by the client, ensure we don't + // call Close() on it. + cli := &nopCloserClient{client} + return newConsumer(cli) +} + +func newConsumer(client Client) (Consumer, error) { // Check that we are not dealing with a closed Client before processing any other arguments if client.Closed() { return nil, ErrClosedClient @@ -118,10 +114,7 @@ func NewConsumerFromClient(client Client) (Consumer, error) { } func (c *consumer) Close() error { - if c.ownClient { - return c.client.Close() - } - return nil + return c.client.Close() } func (c *consumer) Topics() ([]string, error) { @@ -261,12 +254,11 @@ func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { // or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches. // // To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of -// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process +// consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process // AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call // Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will // also drain the Messages channel, harvest all errors & return them once cleanup has completed. type PartitionConsumer interface { - // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you // should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this // function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call @@ -298,22 +290,22 @@ type PartitionConsumer interface { type partitionConsumer struct { highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG - consumer *consumer - conf *Config - topic string - partition int32 + consumer *consumer + conf *Config broker *brokerConsumer messages chan *ConsumerMessage errors chan *ConsumerError feeder chan *FetchResponse trigger, dying chan none - responseResult error closeOnce sync.Once - - fetchSize int32 - offset int64 + topic string + partition int32 + responseResult error + fetchSize int32 + offset int64 + retries int32 } var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing @@ -332,12 +324,20 @@ func (child *partitionConsumer) sendError(err error) { } } +func (child *partitionConsumer) computeBackoff() time.Duration { + if child.conf.Consumer.Retry.BackoffFunc != nil { + retries := atomic.AddInt32(&child.retries, 1) + return child.conf.Consumer.Retry.BackoffFunc(int(retries)) + } + return child.conf.Consumer.Retry.Backoff +} + func (child *partitionConsumer) dispatcher() { for range child.trigger { select { case <-child.dying: close(child.trigger) - case <-time.After(child.conf.Consumer.Retry.Backoff): + case <-time.After(child.computeBackoff()): if child.broker != nil { child.consumer.unrefBrokerConsumer(child.broker) child.broker = nil @@ -421,12 +421,6 @@ func (child *partitionConsumer) AsyncClose() { func (child *partitionConsumer) Close() error { child.AsyncClose() - go withRecover(func() { - for range child.messages { - // drain - } - }) - var errors ConsumerErrors for err := range child.errors { errors = append(errors, err) @@ -451,17 +445,29 @@ feederLoop: for response := range child.feeder { msgs, child.responseResult = child.parseResponse(response) + if child.responseResult == nil { + atomic.StoreInt32(&child.retries, 0) + } + for i, msg := range msgs { messageSelect: select { + case <-child.dying: + child.broker.acks.Done() + continue feederLoop case child.messages <- msg: firstAttempt = true case <-expiryTicker.C: if !firstAttempt { child.responseResult = errTimedOut child.broker.acks.Done() + remainingLoop: for _, msg = range msgs[i:] { - child.messages <- msg + select { + case child.messages <- msg: + case <-child.dying: + break remainingLoop + } } child.broker.input <- child continue feederLoop @@ -487,9 +493,13 @@ func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMe for _, msgBlock := range msgSet.Messages { for _, msg := range msgBlock.Messages() { offset := msg.Offset + timestamp := msg.Msg.Timestamp if msg.Msg.Version >= 1 { baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset offset += baseOffset + if msg.Msg.LogAppendTime { + timestamp = msgBlock.Msg.Timestamp + } } if offset < child.offset { continue @@ -500,43 +510,57 @@ func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMe Key: msg.Msg.Key, Value: msg.Msg.Value, Offset: offset, - Timestamp: msg.Msg.Timestamp, + Timestamp: timestamp, BlockTimestamp: msgBlock.Msg.Timestamp, }) child.offset = offset + 1 } } if len(messages) == 0 { - return nil, ErrIncompleteResponse + child.offset++ } return messages, nil } func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) { - var messages []*ConsumerMessage + messages := make([]*ConsumerMessage, 0, len(batch.Records)) + for _, rec := range batch.Records { offset := batch.FirstOffset + rec.OffsetDelta if offset < child.offset { continue } + timestamp := batch.FirstTimestamp.Add(rec.TimestampDelta) + if batch.LogAppendTime { + timestamp = batch.MaxTimestamp + } messages = append(messages, &ConsumerMessage{ Topic: child.topic, Partition: child.partition, Key: rec.Key, Value: rec.Value, Offset: offset, - Timestamp: batch.FirstTimestamp.Add(rec.TimestampDelta), + Timestamp: timestamp, Headers: rec.Headers, }) child.offset = offset + 1 } if len(messages) == 0 { - child.offset += 1 + child.offset++ } return messages, nil } func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) { + var ( + metricRegistry = child.conf.MetricRegistry + consumerBatchSizeMetric metrics.Histogram + ) + + if metricRegistry != nil { + consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", metricRegistry) + } + block := response.GetBlock(child.topic, child.partition) if block == nil { return nil, ErrIncompleteResponse @@ -550,6 +574,9 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu if err != nil { return nil, err } + + consumerBatchSizeMetric.Update(int64(nRecs)) + if nRecs == 0 { partialTrailingMessage, err := block.isPartial() if err != nil { @@ -577,6 +604,12 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu child.fetchSize = child.conf.Consumer.Fetch.Default atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset) + // abortedProducerIDs contains producerID which message should be ignored as uncommitted + // - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset) + // - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over + abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions)) + abortedTransactions := block.getAbortedTransactions() + messages := []*ConsumerMessage{} for _, records := range block.RecordsSet { switch records.recordsType { @@ -588,14 +621,56 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu messages = append(messages, messageSetMessages...) case defaultRecords: + // Consume remaining abortedTransaction up to last offset of current batch + for _, txn := range abortedTransactions { + if txn.FirstOffset > records.RecordBatch.LastOffset() { + break + } + abortedProducerIDs[txn.ProducerID] = struct{}{} + // Pop abortedTransactions so that we never add it again + abortedTransactions = abortedTransactions[1:] + } + recordBatchMessages, err := child.parseRecords(records.RecordBatch) if err != nil { return nil, err } - if control, err := records.isControl(); err != nil || control { + + // Parse and commit offset but do not expose messages that are: + // - control records + // - part of an aborted transaction when set to `ReadCommitted` + + // control record + isControl, err := records.isControl() + if err != nil { + // I don't know why there is this continue in case of error to begin with + // Safe bet is to ignore control messages if ReadUncommitted + // and block on them in case of error and ReadCommitted + if child.conf.Consumer.IsolationLevel == ReadCommitted { + return nil, err + } + continue + } + if isControl { + controlRecord, err := records.getControlRecord() + if err != nil { + return nil, err + } + + if controlRecord.Type == ControlRecordAbort { + delete(abortedProducerIDs, records.RecordBatch.ProducerID) + } continue } + // filter aborted transactions + if child.conf.Consumer.IsolationLevel == ReadCommitted { + _, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID] + if records.RecordBatch.IsTransactional && isAborted { + continue + } + } + messages = append(messages, recordBatchMessages...) default: return nil, fmt.Errorf("unknown records type: %v", records.recordsType) @@ -605,15 +680,13 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu return messages, nil } -// brokerConsumer - type brokerConsumer struct { consumer *consumer broker *Broker input chan *partitionConsumer newSubscriptions chan []*partitionConsumer - wait chan none subscriptions map[*partitionConsumer]none + wait chan none acks sync.WaitGroup refs int } @@ -635,14 +708,14 @@ func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer { return bc } +// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer +// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks +// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give +// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available, +// so the main goroutine can block waiting for work if it has none. func (bc *brokerConsumer) subscriptionManager() { var buffer []*partitionConsumer - // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer - // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks - // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give - // it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available, - // so the main goroutine can block waiting for work if it has none. for { if len(buffer) > 0 { select { @@ -675,10 +748,10 @@ done: close(bc.newSubscriptions) } +//subscriptionConsumer ensures we will get nil right away if no new subscriptions is available func (bc *brokerConsumer) subscriptionConsumer() { <-bc.wait // wait for our first piece of work - // the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available for newSubscriptions := range bc.newSubscriptions { bc.updateSubscriptions(newSubscriptions) @@ -719,20 +792,20 @@ func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsu close(child.trigger) delete(bc.subscriptions, child) default: - break + // no-op } } } +//handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed func (bc *brokerConsumer) handleResponses() { - // handles the response codes left for us by our subscriptions, and abandons ones that have been closed for child := range bc.subscriptions { result := child.responseResult child.responseResult = nil switch result { case nil: - break + // no-op case errTimedOut: Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n", bc.broker.ID(), child.topic, child.partition) @@ -787,6 +860,9 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { MinBytes: bc.consumer.conf.Consumer.Fetch.Min, MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond), } + if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) { + request.Version = 1 + } if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) { request.Version = 2 } @@ -796,7 +872,7 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { } if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) { request.Version = 4 - request.Isolation = ReadUncommitted // We don't support yet transactions. + request.Isolation = bc.consumer.conf.Consumer.IsolationLevel } for child := range bc.subscriptions { diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/Shopify/sarama/consumer_group.go index bb6a2c2b..8de95137 100644 --- a/vendor/github.com/Shopify/sarama/consumer_group.go +++ b/vendor/github.com/Shopify/sarama/consumer_group.go @@ -52,8 +52,7 @@ type ConsumerGroup interface { } type consumerGroup struct { - client Client - ownClient bool + client Client config *Config consumer Consumer @@ -73,20 +72,24 @@ func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerG return nil, err } - c, err := NewConsumerGroupFromClient(groupID, client) + c, err := newConsumerGroup(groupID, client) if err != nil { _ = client.Close() - return nil, err } - - c.(*consumerGroup).ownClient = true - return c, nil + return c, err } // NewConsumerGroupFromClient creates a new consumer group using the given client. It is still // necessary to call Close() on the underlying client when shutting down this consumer. // PLEASE NOTE: consumer groups can only re-use but not share clients. func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) { + // For clients passed in by the client, ensure we don't + // call Close() on it. + cli := &nopCloserClient{client} + return newConsumerGroup(groupID, cli) +} + +func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) { config := client.Config() if !config.Version.IsAtLeast(V0_10_2_0) { return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0") @@ -131,10 +134,8 @@ func (c *consumerGroup) Close() (err error) { err = e } - if c.ownClient { - if e := c.client.Close(); e != nil { - err = e - } + if e := c.client.Close(); e != nil { + err = e } }) return @@ -162,14 +163,8 @@ func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler Co return err } - // Get coordinator - coordinator, err := c.client.Coordinator(c.groupID) - if err != nil { - return err - } - // Init session - sess, err := c.newSession(ctx, coordinator, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max) + sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max) if err == ErrClosedClient { return ErrClosedConsumerGroup } else if err != nil { @@ -183,7 +178,33 @@ func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler Co return sess.release(true) } -func (c *consumerGroup) newSession(ctx context.Context, coordinator *Broker, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) { +func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) { + select { + case <-c.closed: + return nil, ErrClosedConsumerGroup + case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff): + } + + if refreshCoordinator { + err := c.client.RefreshCoordinator(c.groupID) + if err != nil { + return c.retryNewSession(ctx, topics, handler, retries, true) + } + } + + return c.newSession(ctx, topics, handler, retries-1) +} + +func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) { + coordinator, err := c.client.Coordinator(c.groupID) + if err != nil { + if retries <= 0 { + return nil, err + } + + return c.retryNewSession(ctx, topics, handler, retries, true) + } + // Join consumer group join, err := c.joinGroupRequest(coordinator, topics) if err != nil { @@ -195,19 +216,19 @@ func (c *consumerGroup) newSession(ctx context.Context, coordinator *Broker, top c.memberID = join.MemberId case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately c.memberID = "" - return c.newSession(ctx, coordinator, topics, handler, retries) - case ErrRebalanceInProgress: // retry after backoff + return c.newSession(ctx, topics, handler, retries) + case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh if retries <= 0 { return nil, join.Err } - select { - case <-c.closed: - return nil, ErrClosedConsumerGroup - case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff): + return c.retryNewSession(ctx, topics, handler, retries, true) + case ErrRebalanceInProgress: // retry after backoff + if retries <= 0 { + return nil, join.Err } - return c.newSession(ctx, coordinator, topics, handler, retries-1) + return c.retryNewSession(ctx, topics, handler, retries, false) default: return nil, join.Err } @@ -236,19 +257,19 @@ func (c *consumerGroup) newSession(ctx context.Context, coordinator *Broker, top case ErrNoError: case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately c.memberID = "" - return c.newSession(ctx, coordinator, topics, handler, retries) - case ErrRebalanceInProgress: // retry after backoff + return c.newSession(ctx, topics, handler, retries) + case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh if retries <= 0 { return nil, sync.Err } - select { - case <-c.closed: - return nil, ErrClosedConsumerGroup - case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff): + return c.retryNewSession(ctx, topics, handler, retries, true) + case ErrRebalanceInProgress: // retry after backoff + if retries <= 0 { + return nil, sync.Err } - return c.newSession(ctx, coordinator, topics, handler, retries-1) + return c.retryNewSession(ctx, topics, handler, retries, false) default: return nil, sync.Err } @@ -613,7 +634,7 @@ func (s *consumerGroupSession) release(withCleanup bool) (err error) { s.releaseOnce.Do(func() { if withCleanup { if e := s.handler.Cleanup(s); e != nil { - s.parent.handleError(err, "", -1) + s.parent.handleError(e, "", -1) err = e } } @@ -657,6 +678,12 @@ func (s *consumerGroupSession) heartbeatLoop() { resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID) if err != nil { _ = coordinator.Close() + + if retries <= 0 { + s.parent.handleError(err, "", -1) + return + } + retries-- continue } diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go index 4d86e930..f39a8711 100644 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go @@ -5,7 +5,7 @@ import ( "strconv" ) -//ConsumerMetadataResponse holds the reponse for a consumer gorup meta data request +//ConsumerMetadataResponse holds the response for a consumer group meta data requests type ConsumerMetadataResponse struct { Err KError Coordinator *Broker diff --git a/vendor/github.com/Shopify/sarama/control_record.go b/vendor/github.com/Shopify/sarama/control_record.go new file mode 100644 index 00000000..9b75ab53 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/control_record.go @@ -0,0 +1,72 @@ +package sarama + +//ControlRecordType ... +type ControlRecordType int + +const ( + //ControlRecordAbort is a control record for abort + ControlRecordAbort ControlRecordType = iota + //ControlRecordCommit is a control record for commit + ControlRecordCommit + //ControlRecordUnknown is a control record of unknown type + ControlRecordUnknown +) + +// Control records are returned as a record by fetchRequest +// However unlike "normal" records, they mean nothing application wise. +// They only serve internal logic for supporting transactions. +type ControlRecord struct { + Version int16 + CoordinatorEpoch int32 + Type ControlRecordType +} + +func (cr *ControlRecord) decode(key, value packetDecoder) error { + var err error + cr.Version, err = value.getInt16() + if err != nil { + return err + } + + cr.CoordinatorEpoch, err = value.getInt32() + if err != nil { + return err + } + + // There a version for the value part AND the key part. And I have no idea if they are supposed to match or not + // Either way, all these version can only be 0 for now + cr.Version, err = key.getInt16() + if err != nil { + return err + } + + recordType, err := key.getInt16() + if err != nil { + return err + } + + switch recordType { + case 0: + cr.Type = ControlRecordAbort + case 1: + cr.Type = ControlRecordCommit + default: + // from JAVA implementation: + // UNKNOWN is used to indicate a control type which the client is not aware of and should be ignored + cr.Type = ControlRecordUnknown + } + return nil +} + +func (cr *ControlRecord) encode(key, value packetEncoder) { + value.putInt16(cr.Version) + value.putInt32(cr.CoordinatorEpoch) + key.putInt16(cr.Version) + + switch cr.Type { + case ControlRecordAbort: + key.putInt16(0) + case ControlRecordCommit: + key.putInt16(1) + } +} diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go index abd621c6..bb18204a 100644 --- a/vendor/github.com/Shopify/sarama/create_partitions_response.go +++ b/vendor/github.com/Shopify/sarama/create_partitions_response.go @@ -1,6 +1,9 @@ package sarama -import "time" +import ( + "fmt" + "time" +) type CreatePartitionsResponse struct { ThrottleTime time.Duration @@ -69,6 +72,14 @@ type TopicPartitionError struct { ErrMsg *string } +func (t *TopicPartitionError) Error() string { + text := t.Err.Error() + if t.ErrMsg != nil { + text = fmt.Sprintf("%s - %s", text, *t.ErrMsg) + } + return text +} + func (t *TopicPartitionError) encode(pe packetEncoder) error { pe.putInt16(int16(t.Err)) diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go index 66207e00..a493e02a 100644 --- a/vendor/github.com/Shopify/sarama/create_topics_response.go +++ b/vendor/github.com/Shopify/sarama/create_topics_response.go @@ -1,6 +1,9 @@ package sarama -import "time" +import ( + "fmt" + "time" +) type CreateTopicsResponse struct { Version int16 @@ -83,6 +86,14 @@ type TopicError struct { ErrMsg *string } +func (t *TopicError) Error() string { + text := t.Err.Error() + if t.ErrMsg != nil { + text = fmt.Sprintf("%s - %s", text, *t.ErrMsg) + } + return text +} + func (t *TopicError) encode(pe packetEncoder, version int16) error { pe.putInt16(int16(t.Err)) diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go new file mode 100644 index 00000000..eaccbfc2 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/decompress.go @@ -0,0 +1,63 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "sync" + + "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4" +) + +var ( + lz4ReaderPool = sync.Pool{ + New: func() interface{} { + return lz4.NewReader(nil) + }, + } + + gzipReaderPool sync.Pool +) + +func decompress(cc CompressionCodec, data []byte) ([]byte, error) { + switch cc { + case CompressionNone: + return data, nil + case CompressionGZIP: + var ( + err error + reader *gzip.Reader + readerIntf = gzipReaderPool.Get() + ) + if readerIntf != nil { + reader = readerIntf.(*gzip.Reader) + } else { + reader, err = gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + } + + defer gzipReaderPool.Put(reader) + + if err := reader.Reset(bytes.NewReader(data)); err != nil { + return nil, err + } + + return ioutil.ReadAll(reader) + case CompressionSnappy: + return snappy.Decode(data) + case CompressionLZ4: + reader := lz4ReaderPool.Get().(*lz4.Reader) + defer lz4ReaderPool.Put(reader) + + reader.Reset(bytes.NewReader(data)) + return ioutil.ReadAll(reader) + case CompressionZSTD: + return zstdDecompress(nil, data) + default: + return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)} + } +} diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/Shopify/sarama/describe_configs_request.go index 416a4fe6..ccb587b3 100644 --- a/vendor/github.com/Shopify/sarama/describe_configs_request.go +++ b/vendor/github.com/Shopify/sarama/describe_configs_request.go @@ -103,7 +103,7 @@ func (r *DescribeConfigsRequest) version() int16 { func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: - return V1_0_0_0 + return V1_1_0_0 case 2: return V2_0_0_0 default: diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go index 63fb6ea8..57372322 100644 --- a/vendor/github.com/Shopify/sarama/describe_configs_response.go +++ b/vendor/github.com/Shopify/sarama/describe_configs_response.go @@ -26,12 +26,12 @@ func (s ConfigSource) String() string { } const ( - SourceUnknown ConfigSource = 0 - SourceTopic ConfigSource = 1 - SourceDynamicBroker ConfigSource = 2 - SourceDynamicDefaultBroker ConfigSource = 3 - SourceStaticBroker ConfigSource = 4 - SourceDefault ConfigSource = 5 + SourceUnknown ConfigSource = iota + SourceTopic + SourceDynamicBroker + SourceDynamicDefaultBroker + SourceStaticBroker + SourceDefault ) type DescribeConfigsResponse struct { diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go index c11421d9..87a4c61c 100644 --- a/vendor/github.com/Shopify/sarama/errors.go +++ b/vendor/github.com/Shopify/sarama/errors.go @@ -157,6 +157,10 @@ const ( ErrFetchSessionIDNotFound KError = 70 ErrInvalidFetchSessionEpoch KError = 71 ErrListenerNotFound KError = 72 + ErrTopicDeletionDisabled KError = 73 + ErrFencedLeaderEpoch KError = 74 + ErrUnknownLeaderEpoch KError = 75 + ErrUnsupportedCompressionType KError = 76 ) func (err KError) Error() string { @@ -311,6 +315,14 @@ func (err KError) Error() string { return "kafka server: The fetch session epoch is invalid." case ErrListenerNotFound: return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed." + case ErrTopicDeletionDisabled: + return "kafka server: Topic deletion is disabled." + case ErrFencedLeaderEpoch: + return "kafka server: The leader epoch in the request is older than the epoch on the broker." + case ErrUnknownLeaderEpoch: + return "kafka server: The leader epoch in the request is newer than the epoch on the broker." + case ErrUnsupportedCompressionType: + return "kafka server: The requesting client does not support the compression type of given partition." } return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err) diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go index 462ab8af..4db9ddd3 100644 --- a/vendor/github.com/Shopify/sarama/fetch_request.go +++ b/vendor/github.com/Shopify/sarama/fetch_request.go @@ -36,8 +36,8 @@ type FetchRequest struct { type IsolationLevel int8 const ( - ReadUncommitted IsolationLevel = 0 - ReadCommitted IsolationLevel = 1 + ReadUncommitted IsolationLevel = iota + ReadCommitted ) func (r *FetchRequest) encode(pe packetEncoder) (err error) { diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go index dade1c47..3afc1877 100644 --- a/vendor/github.com/Shopify/sarama/fetch_response.go +++ b/vendor/github.com/Shopify/sarama/fetch_response.go @@ -1,6 +1,7 @@ package sarama import ( + "sort" "time" ) @@ -33,7 +34,7 @@ type FetchResponseBlock struct { HighWaterMarkOffset int64 LastStableOffset int64 AbortedTransactions []*AbortedTransaction - Records *Records // deprecated: use FetchResponseBlock.Records + Records *Records // deprecated: use FetchResponseBlock.RecordsSet RecordsSet []*Records Partial bool } @@ -185,10 +186,23 @@ func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) return pe.pop() } +func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction { + // I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered + // plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself + at := b.AbortedTransactions + sort.Slice( + at, + func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset }, + ) + return at +} + type FetchResponse struct { - Blocks map[string]map[int32]*FetchResponseBlock - ThrottleTime time.Duration - Version int16 // v1 requires 0.9+, v2 requires 0.10+ + Blocks map[string]map[int32]*FetchResponseBlock + ThrottleTime time.Duration + Version int16 // v1 requires 0.9+, v2 requires 0.10+ + LogAppendTime bool + Timestamp time.Time } func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) { @@ -355,10 +369,13 @@ func encodeKV(key, value Encoder) ([]byte, []byte) { return kb, vb } -func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { +func (r *FetchResponse) AddMessageWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time, version int8) { frb := r.getOrCreateBlock(topic, partition) kb, vb := encodeKV(key, value) - msg := &Message{Key: kb, Value: vb} + if r.LogAppendTime { + timestamp = r.Timestamp + } + msg := &Message{Key: kb, Value: vb, LogAppendTime: r.LogAppendTime, Timestamp: timestamp, Version: version} msgBlock := &MessageBlock{Msg: msg, Offset: offset} if len(frb.RecordsSet) == 0 { records := newLegacyRecords(&MessageSet{}) @@ -368,18 +385,94 @@ func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Enc set.Messages = append(set.Messages, msgBlock) } -func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) { +func (r *FetchResponse) AddRecordWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time) { frb := r.getOrCreateBlock(topic, partition) kb, vb := encodeKV(key, value) - rec := &Record{Key: kb, Value: vb, OffsetDelta: offset} if len(frb.RecordsSet) == 0 { - records := newDefaultRecords(&RecordBatch{Version: 2}) + records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp}) frb.RecordsSet = []*Records{&records} } batch := frb.RecordsSet[0].RecordBatch + rec := &Record{Key: kb, Value: vb, OffsetDelta: offset, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)} batch.addRecord(rec) } +// AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp +// But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse +// Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions +func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) { + frb := r.getOrCreateBlock(topic, partition) + kb, vb := encodeKV(key, value) + + records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp}) + batch := &RecordBatch{ + Version: 2, + LogAppendTime: r.LogAppendTime, + FirstTimestamp: timestamp, + MaxTimestamp: r.Timestamp, + FirstOffset: offset, + LastOffsetDelta: 0, + ProducerID: producerID, + IsTransactional: isTransactional, + } + rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)} + batch.addRecord(rec) + records.RecordBatch = batch + + frb.RecordsSet = append(frb.RecordsSet, &records) +} + +func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) { + frb := r.getOrCreateBlock(topic, partition) + + // batch + batch := &RecordBatch{ + Version: 2, + LogAppendTime: r.LogAppendTime, + FirstTimestamp: timestamp, + MaxTimestamp: r.Timestamp, + FirstOffset: offset, + LastOffsetDelta: 0, + ProducerID: producerID, + IsTransactional: true, + Control: true, + } + + // records + records := newDefaultRecords(nil) + records.RecordBatch = batch + + // record + crAbort := ControlRecord{ + Version: 0, + Type: recordType, + } + crKey := &realEncoder{raw: make([]byte, 4)} + crValue := &realEncoder{raw: make([]byte, 6)} + crAbort.encode(crKey, crValue) + rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)} + batch.addRecord(rec) + + frb.RecordsSet = append(frb.RecordsSet, &records) +} + +func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { + r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0) +} + +func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) { + r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{}) +} + +func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) { + r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{}) +} + +func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) { + // define controlRecord key and value + r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{}) +} + func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) { frb := r.getOrCreateBlock(topic, partition) if len(frb.RecordsSet) == 0 { diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/Shopify/sarama/find_coordinator_request.go index 0ab5cb5f..ff2ad206 100644 --- a/vendor/github.com/Shopify/sarama/find_coordinator_request.go +++ b/vendor/github.com/Shopify/sarama/find_coordinator_request.go @@ -3,8 +3,8 @@ package sarama type CoordinatorType int8 const ( - CoordinatorGroup CoordinatorType = 0 - CoordinatorTransaction CoordinatorType = 1 + CoordinatorGroup CoordinatorType = iota + CoordinatorTransaction ) type FindCoordinatorRequest struct { diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go index 44d5cc91..df8c91cc 100644 --- a/vendor/github.com/Shopify/sarama/message.go +++ b/vendor/github.com/Shopify/sarama/message.go @@ -1,47 +1,52 @@ package sarama import ( - "bytes" - "compress/gzip" "fmt" - "io/ioutil" "time" +) - "github.com/eapache/go-xerial-snappy" - "github.com/pierrec/lz4" +const ( + //CompressionNone no compression + CompressionNone CompressionCodec = iota + //CompressionGZIP compression using GZIP + CompressionGZIP + //CompressionSnappy compression using snappy + CompressionSnappy + //CompressionLZ4 compression using LZ4 + CompressionLZ4 + //CompressionZSTD compression using ZSTD + CompressionZSTD + + // The lowest 3 bits contain the compression codec used for the message + compressionCodecMask int8 = 0x07 + + // Bit 3 set for "LogAppend" timestamps + timestampTypeMask = 0x08 + + // CompressionLevelDefault is the constant to use in CompressionLevel + // to have the default compression level for any codec. The value is picked + // that we don't use any existing compression levels. + CompressionLevelDefault = -1000 ) // CompressionCodec represents the various compression codecs recognized by Kafka in messages. type CompressionCodec int8 -// The lowest 3 bits contain the compression codec used for the message -const compressionCodecMask int8 = 0x07 - -const ( - CompressionNone CompressionCodec = 0 - CompressionGZIP CompressionCodec = 1 - CompressionSnappy CompressionCodec = 2 - CompressionLZ4 CompressionCodec = 3 - CompressionZSTD CompressionCodec = 4 -) - func (cc CompressionCodec) String() string { return []string{ "none", "gzip", "snappy", "lz4", + "zstd", }[int(cc)] } -// CompressionLevelDefault is the constant to use in CompressionLevel -// to have the default compression level for any codec. The value is picked -// that we don't use any existing compression levels. -const CompressionLevelDefault = -1000 - +//Message is a kafka message type type Message struct { Codec CompressionCodec // codec used to compress the message contents CompressionLevel int // compression level + LogAppendTime bool // the used timestamp is LogAppendTime Key []byte // the message key, may be nil Value []byte // the message contents Set *MessageSet // the message set a message might wrap @@ -58,6 +63,9 @@ func (m *Message) encode(pe packetEncoder) error { pe.putInt8(m.Version) attributes := int8(m.Codec) & compressionCodecMask + if m.LogAppendTime { + attributes |= timestampTypeMask + } pe.putInt8(attributes) if m.Version >= 1 { @@ -77,53 +85,12 @@ func (m *Message) encode(pe packetEncoder) error { payload = m.compressedCache m.compressedCache = nil } else if m.Value != nil { - switch m.Codec { - case CompressionNone: - payload = m.Value - case CompressionGZIP: - var buf bytes.Buffer - var writer *gzip.Writer - if m.CompressionLevel != CompressionLevelDefault { - writer, err = gzip.NewWriterLevel(&buf, m.CompressionLevel) - if err != nil { - return err - } - } else { - writer = gzip.NewWriter(&buf) - } - if _, err = writer.Write(m.Value); err != nil { - return err - } - if err = writer.Close(); err != nil { - return err - } - m.compressedCache = buf.Bytes() - payload = m.compressedCache - case CompressionSnappy: - tmp := snappy.Encode(m.Value) - m.compressedCache = tmp - payload = m.compressedCache - case CompressionLZ4: - var buf bytes.Buffer - writer := lz4.NewWriter(&buf) - if _, err = writer.Write(m.Value); err != nil { - return err - } - if err = writer.Close(); err != nil { - return err - } - m.compressedCache = buf.Bytes() - payload = m.compressedCache - case CompressionZSTD: - c, err := zstdCompressLevel(nil, m.Value, m.CompressionLevel) - if err != nil { - return err - } - m.compressedCache = c - payload = m.compressedCache - default: - return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)} + + payload, err = compress(m.Codec, m.CompressionLevel, m.Value) + if err != nil { + return err } + m.compressedCache = payload // Keep in mind the compressed payload size for metric gathering m.compressedSize = len(payload) } @@ -155,6 +122,7 @@ func (m *Message) decode(pd packetDecoder) (err error) { return err } m.Codec = CompressionCodec(attribute & compressionCodecMask) + m.LogAppendTime = attribute×tampTypeMask == timestampTypeMask if m.Version == 1 { if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil { @@ -179,59 +147,24 @@ func (m *Message) decode(pd packetDecoder) (err error) { switch m.Codec { case CompressionNone: // nothing to do - case CompressionGZIP: + default: if m.Value == nil { break } - reader, err := gzip.NewReader(bytes.NewReader(m.Value)) + + m.Value, err = decompress(m.Codec, m.Value) if err != nil { return err } - if m.Value, err = ioutil.ReadAll(reader); err != nil { - return err - } - if err := m.decodeSet(); err != nil { - return err - } - case CompressionSnappy: - if m.Value == nil { - break - } - if m.Value, err = snappy.Decode(m.Value); err != nil { - return err - } - if err := m.decodeSet(); err != nil { - return err - } - case CompressionLZ4: - if m.Value == nil { - break - } - reader := lz4.NewReader(bytes.NewReader(m.Value)) - if m.Value, err = ioutil.ReadAll(reader); err != nil { - return err - } - if err := m.decodeSet(); err != nil { - return err - } - case CompressionZSTD: - if m.Value == nil { - break - } - if m.Value, err = zstdDecompress(nil, m.Value); err != nil { - return err - } if err := m.decodeSet(); err != nil { return err } - default: - return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)} } return pd.pop() } -// decodes a message set from a previousy encoded bulk-message +// decodes a message set from a previously encoded bulk-message func (m *Message) decodeSet() (err error) { pd := realDecoder{raw: m.Value} m.Set = &MessageSet{} diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go index 17dc4289..43239c4d 100644 --- a/vendor/github.com/Shopify/sarama/metadata_request.go +++ b/vendor/github.com/Shopify/sarama/metadata_request.go @@ -10,7 +10,7 @@ func (r *MetadataRequest) encode(pe packetEncoder) error { if r.Version < 0 || r.Version > 5 { return PacketEncodingError{"invalid or unsupported MetadataRequest version field"} } - if r.Version == 0 || len(r.Topics) > 0 { + if r.Version == 0 || r.Version == 5 || len(r.Topics) > 0 { err := pe.putArrayLength(len(r.Topics)) if err != nil { return err @@ -37,15 +37,8 @@ func (r *MetadataRequest) decode(pd packetDecoder, version int16) error { if err != nil { return err } - if size < 0 { - return nil - } else { - topicCount := size - if topicCount == 0 { - return nil - } - - r.Topics = make([]string, topicCount) + if size > 0 { + r.Topics = make([]string, size) for i := range r.Topics { topic, err := pd.getString() if err != nil { diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go index c402d05f..b2d532e4 100644 --- a/vendor/github.com/Shopify/sarama/metadata_response.go +++ b/vendor/github.com/Shopify/sarama/metadata_response.go @@ -296,7 +296,7 @@ foundTopic: return tmatch } -func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) { +func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, offline []int32, err KError) { tmatch := r.AddTopic(topic, ErrNoError) var pmatch *PartitionMetadata @@ -316,6 +316,7 @@ foundPartition: pmatch.Leader = brokerID pmatch.Replicas = replicas pmatch.Isr = isr + pmatch.OfflineReplicas = offline pmatch.Err = err } diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go index 4869708e..90e5a87f 100644 --- a/vendor/github.com/Shopify/sarama/metrics.go +++ b/vendor/github.com/Shopify/sarama/metrics.go @@ -28,14 +28,6 @@ func getMetricNameForBroker(name string, broker *Broker) string { return fmt.Sprintf(name+"-for-broker-%d", broker.ID()) } -func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter { - return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r) -} - -func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram { - return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r) -} - func getMetricNameForTopic(name string, topic string) string { // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy // cf. KAFKA-1902 and KAFKA-2337 diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go index fe55200c..919d8bb0 100644 --- a/vendor/github.com/Shopify/sarama/mockresponses.go +++ b/vendor/github.com/Shopify/sarama/mockresponses.go @@ -2,6 +2,7 @@ package sarama import ( "fmt" + "strings" ) // TestReporter has methods matching go's testing.T to avoid importing @@ -66,6 +67,69 @@ func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) { return res } +type MockListGroupsResponse struct { + groups map[string]string + t TestReporter +} + +func NewMockListGroupsResponse(t TestReporter) *MockListGroupsResponse { + return &MockListGroupsResponse{ + groups: make(map[string]string), + t: t, + } +} + +func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoder { + request := reqBody.(*ListGroupsRequest) + _ = request + response := &ListGroupsResponse{ + Groups: m.groups, + } + return response +} + +func (m *MockListGroupsResponse) AddGroup(groupID, protocolType string) *MockListGroupsResponse { + m.groups[groupID] = protocolType + return m +} + +type MockDescribeGroupsResponse struct { + groups map[string]*GroupDescription + t TestReporter +} + +func NewMockDescribeGroupsResponse(t TestReporter) *MockDescribeGroupsResponse { + return &MockDescribeGroupsResponse{ + t: t, + groups: make(map[string]*GroupDescription), + } +} + +func (m *MockDescribeGroupsResponse) AddGroupDescription(groupID string, description *GroupDescription) *MockDescribeGroupsResponse { + m.groups[groupID] = description + return m +} + +func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoder { + request := reqBody.(*DescribeGroupsRequest) + + response := &DescribeGroupsResponse{} + for _, requestedGroup := range request.Groups { + if group, ok := m.groups[requestedGroup]; ok { + response.Groups = append(response.Groups, group) + } else { + // Mimic real kafka - if a group doesn't exist, return + // an entry with state "Dead" + response.Groups = append(response.Groups, &GroupDescription{ + GroupId: requestedGroup, + State: "Dead", + }) + } + } + + return response +} + // MockMetadataResponse is a `MetadataResponse` builder. type MockMetadataResponse struct { controllerID int32 @@ -111,17 +175,25 @@ func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder { for addr, brokerID := range mmr.brokers { metadataResponse.AddBroker(addr, brokerID) } + + // Generate set of replicas + replicas := []int32{} + offlineReplicas := []int32{} + for _, brokerID := range mmr.brokers { + replicas = append(replicas, brokerID) + } + if len(metadataRequest.Topics) == 0 { for topic, partitions := range mmr.leaders { for partition, brokerID := range partitions { - metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) + metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError) } } return metadataResponse } for _, topic := range metadataRequest.Topics { for partition, brokerID := range mmr.leaders[topic] { - metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) + metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError) } } return metadataResponse @@ -549,10 +621,20 @@ func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse { func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoder { req := reqBody.(*CreateTopicsRequest) - res := &CreateTopicsResponse{} + res := &CreateTopicsResponse{ + Version: req.Version, + } res.TopicErrors = make(map[string]*TopicError) - for topic, _ := range req.TopicDetails { + for topic := range req.TopicDetails { + if res.Version >= 1 && strings.HasPrefix(topic, "_") { + msg := "insufficient permissions to create topic with reserved prefix" + res.TopicErrors[topic] = &TopicError{ + Err: ErrTopicAuthorizationFailed, + ErrMsg: &msg, + } + continue + } res.TopicErrors[topic] = &TopicError{Err: ErrNoError} } return res @@ -590,7 +672,15 @@ func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoder { res := &CreatePartitionsResponse{} res.TopicPartitionErrors = make(map[string]*TopicPartitionError) - for topic, _ := range req.TopicPartitions { + for topic := range req.TopicPartitions { + if strings.HasPrefix(topic, "_") { + msg := "insufficient permissions to create partition on topic with reserved prefix" + res.TopicPartitionErrors[topic] = &TopicPartitionError{ + Err: ErrTopicAuthorizationFailed, + ErrMsg: &msg, + } + continue + } res.TopicPartitionErrors[topic] = &TopicPartitionError{Err: ErrNoError} } return res @@ -611,7 +701,7 @@ func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoder { for topic, deleteRecordRequestTopic := range req.Topics { partitions := make(map[int32]*DeleteRecordsResponsePartition) - for partition, _ := range deleteRecordRequestTopic.PartitionOffsets { + for partition := range deleteRecordRequestTopic.PartitionOffsets { partitions[partition] = &DeleteRecordsResponsePartition{Err: ErrNoError} } res.Topics[topic] = &DeleteRecordsResponseTopic{Partitions: partitions} @@ -631,16 +721,32 @@ func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoder { req := reqBody.(*DescribeConfigsRequest) res := &DescribeConfigsResponse{} - var configEntries []*ConfigEntry - configEntries = append(configEntries, &ConfigEntry{Name: "my_topic", - Value: "my_topic", - ReadOnly: true, - Default: true, - Sensitive: false, - }) - for _, r := range req.Resources { - res.Resources = append(res.Resources, &ResourceResponse{Name: r.Name, Configs: configEntries}) + var configEntries []*ConfigEntry + switch r.Type { + case TopicResource: + configEntries = append(configEntries, + &ConfigEntry{Name: "max.message.bytes", + Value: "1000000", + ReadOnly: false, + Default: true, + Sensitive: false, + }, &ConfigEntry{Name: "retention.ms", + Value: "5000", + ReadOnly: false, + Default: false, + Sensitive: false, + }, &ConfigEntry{Name: "password", + Value: "12345", + ReadOnly: false, + Default: false, + Sensitive: true, + }) + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Configs: configEntries, + }) + } } return res } @@ -706,10 +812,64 @@ func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoder { return res } +type MockSaslAuthenticateResponse struct { + t TestReporter + kerror KError + saslAuthBytes []byte +} + +func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateResponse { + return &MockSaslAuthenticateResponse{t: t} +} + +func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoder { + res := &SaslAuthenticateResponse{} + res.Err = msar.kerror + res.SaslAuthBytes = msar.saslAuthBytes + return res +} + +func (msar *MockSaslAuthenticateResponse) SetError(kerror KError) *MockSaslAuthenticateResponse { + msar.kerror = kerror + return msar +} + +func (msar *MockSaslAuthenticateResponse) SetAuthBytes(saslAuthBytes []byte) *MockSaslAuthenticateResponse { + msar.saslAuthBytes = saslAuthBytes + return msar +} + type MockDeleteAclsResponse struct { t TestReporter } +type MockSaslHandshakeResponse struct { + enabledMechanisms []string + kerror KError + t TestReporter +} + +func NewMockSaslHandshakeResponse(t TestReporter) *MockSaslHandshakeResponse { + return &MockSaslHandshakeResponse{t: t} +} + +func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoder { + res := &SaslHandshakeResponse{} + res.Err = mshr.kerror + res.EnabledMechanisms = mshr.enabledMechanisms + return res +} + +func (mshr *MockSaslHandshakeResponse) SetError(kerror KError) *MockSaslHandshakeResponse { + mshr.kerror = kerror + return mshr +} + +func (mshr *MockSaslHandshakeResponse) SetEnabledMechanisms(enabledMechanisms []string) *MockSaslHandshakeResponse { + mshr.enabledMechanisms = enabledMechanisms + return mshr +} + func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse { return &MockDeleteAclsResponse{t: t} } diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go index 1ec583e6..5732ed95 100644 --- a/vendor/github.com/Shopify/sarama/offset_commit_request.go +++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go @@ -200,11 +200,11 @@ func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset i func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) { partitions := r.blocks[topic] if partitions == nil { - return 0, "", errors.New("No such offset") + return 0, "", errors.New("no such offset") } block := partitions[partitionID] if block == nil { - return 0, "", errors.New("No such offset") + return 0, "", errors.New("no such offset") } return block.offset, block.metadata, nil } diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go index 8ea857f8..923972f2 100644 --- a/vendor/github.com/Shopify/sarama/offset_manager.go +++ b/vendor/github.com/Shopify/sarama/offset_manager.go @@ -120,6 +120,14 @@ func (om *offsetManager) Close() error { return nil } +func (om *offsetManager) computeBackoff(retries int) time.Duration { + if om.conf.Metadata.Retry.BackoffFunc != nil { + return om.conf.Metadata.Retry.BackoffFunc(retries, om.conf.Metadata.Retry.Max) + } else { + return om.conf.Metadata.Retry.Backoff + } +} + func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, string, error) { broker, err := om.coordinator() if err != nil { @@ -161,10 +169,11 @@ func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retri if retries <= 0 { return 0, "", block.Err } + backoff := om.computeBackoff(retries) select { case <-om.closing: return 0, "", block.Err - case <-time.After(om.conf.Metadata.Retry.Backoff): + case <-time.After(backoff): } return om.fetchInitialOffset(topic, partition, retries-1) default: @@ -324,7 +333,6 @@ func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest pom.handleError(err) case ErrOffsetsLoadInProgress: // nothing wrong but we didn't commit, we'll get it next time round - break case ErrUnknownTopicOrPartition: // let the user know *and* try redispatching - if topic-auto-create is // enabled, redispatching should trigger a metadata req and create the @@ -567,6 +575,6 @@ func (pom *partitionOffsetManager) handleError(err error) { func (pom *partitionOffsetManager) release() { pom.releaseOnce.Do(func() { - go close(pom.errors) + close(pom.errors) }) } diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go index 219ec5f2..bba0f7e1 100644 --- a/vendor/github.com/Shopify/sarama/produce_set.go +++ b/vendor/github.com/Shopify/sarama/produce_set.go @@ -81,7 +81,7 @@ func (ps *produceSet) add(msg *ProducerMessage) error { if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence { - return errors.New("Assertion failed: Message out of sequence added to a batch") + return errors.New("assertion failed: message out of sequence added to a batch") } // We are being conservative here to avoid having to prep encode the record size += maximumRecordOverhead @@ -222,9 +222,8 @@ func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool { // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety. case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)): return true - // Would we overflow the size-limit of a compressed message-batch for this partition? - case ps.parent.conf.Producer.Compression != CompressionNone && - ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil && + // Would we overflow the size-limit of a message-batch for this partition? + case ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil && ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes: return true // Would we overflow simply in number of messages? diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/Shopify/sarama/record.go index cded308c..cdccfe32 100644 --- a/vendor/github.com/Shopify/sarama/record.go +++ b/vendor/github.com/Shopify/sarama/record.go @@ -6,10 +6,12 @@ import ( ) const ( + isTransactionalMask = 0x10 controlMask = 0x20 maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1 ) +//RecordHeader stores key and value for a record header type RecordHeader struct { Key []byte Value []byte @@ -33,15 +35,16 @@ func (h *RecordHeader) decode(pd packetDecoder) (err error) { return nil } +//Record is kafka record type type Record struct { + Headers []*RecordHeader + Attributes int8 TimestampDelta time.Duration OffsetDelta int64 Key []byte Value []byte - Headers []*RecordHeader - - length varintLengthField + length varintLengthField } func (r *Record) encode(pe packetEncoder) error { diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go index 5444557f..5e7904d7 100644 --- a/vendor/github.com/Shopify/sarama/record_batch.go +++ b/vendor/github.com/Shopify/sarama/record_batch.go @@ -1,14 +1,8 @@ package sarama import ( - "bytes" - "compress/gzip" "fmt" - "io/ioutil" "time" - - "github.com/eapache/go-xerial-snappy" - "github.com/pierrec/lz4" ) const recordBatchOverhead = 49 @@ -42,6 +36,7 @@ type RecordBatch struct { Codec CompressionCodec CompressionLevel int Control bool + LogAppendTime bool LastOffsetDelta int32 FirstTimestamp time.Time MaxTimestamp time.Time @@ -50,11 +45,16 @@ type RecordBatch struct { FirstSequence int32 Records []*Record PartialTrailingRecord bool + IsTransactional bool compressedRecords []byte recordsLen int // uncompressed records size } +func (b *RecordBatch) LastOffset() int64 { + return b.FirstOffset + int64(b.LastOffsetDelta) +} + func (b *RecordBatch) encode(pe packetEncoder) error { if b.Version != 2 { return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)} @@ -126,6 +126,8 @@ func (b *RecordBatch) decode(pd packetDecoder) (err error) { } b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask) b.Control = attributes&controlMask == controlMask + b.LogAppendTime = attributes×tampTypeMask == timestampTypeMask + b.IsTransactional = attributes&isTransactionalMask == isTransactionalMask if b.LastOffsetDelta, err = pd.getInt32(); err != nil { return err @@ -174,31 +176,9 @@ func (b *RecordBatch) decode(pd packetDecoder) (err error) { return err } - switch b.Codec { - case CompressionNone: - case CompressionGZIP: - reader, err := gzip.NewReader(bytes.NewReader(recBuffer)) - if err != nil { - return err - } - if recBuffer, err = ioutil.ReadAll(reader); err != nil { - return err - } - case CompressionSnappy: - if recBuffer, err = snappy.Decode(recBuffer); err != nil { - return err - } - case CompressionLZ4: - reader := lz4.NewReader(bytes.NewReader(recBuffer)) - if recBuffer, err = ioutil.ReadAll(reader); err != nil { - return err - } - case CompressionZSTD: - if recBuffer, err = zstdDecompress(nil, recBuffer); err != nil { - return err - } - default: - return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", b.Codec)} + recBuffer, err = decompress(b.Codec, recBuffer) + if err != nil { + return err } b.recordsLen = len(recBuffer) @@ -219,50 +199,8 @@ func (b *RecordBatch) encodeRecords(pe packetEncoder) error { } b.recordsLen = len(raw) - switch b.Codec { - case CompressionNone: - b.compressedRecords = raw - case CompressionGZIP: - var buf bytes.Buffer - var writer *gzip.Writer - if b.CompressionLevel != CompressionLevelDefault { - writer, err = gzip.NewWriterLevel(&buf, b.CompressionLevel) - if err != nil { - return err - } - } else { - writer = gzip.NewWriter(&buf) - } - if _, err := writer.Write(raw); err != nil { - return err - } - if err := writer.Close(); err != nil { - return err - } - b.compressedRecords = buf.Bytes() - case CompressionSnappy: - b.compressedRecords = snappy.Encode(raw) - case CompressionLZ4: - var buf bytes.Buffer - writer := lz4.NewWriter(&buf) - if _, err := writer.Write(raw); err != nil { - return err - } - if err := writer.Close(); err != nil { - return err - } - b.compressedRecords = buf.Bytes() - case CompressionZSTD: - c, err := zstdCompressLevel(nil, raw, b.CompressionLevel) - if err != nil { - return err - } - b.compressedRecords = c - default: - return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)} - } - - return nil + b.compressedRecords, err = compress(b.Codec, b.CompressionLevel, raw) + return err } func (b *RecordBatch) computeAttributes() int16 { @@ -270,6 +208,12 @@ func (b *RecordBatch) computeAttributes() int16 { if b.Control { attr |= controlMask } + if b.LogAppendTime { + attr |= timestampTypeMask + } + if b.IsTransactional { + attr |= isTransactionalMask + } return attr } diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go index 192f5927..fa8a21f3 100644 --- a/vendor/github.com/Shopify/sarama/records.go +++ b/vendor/github.com/Shopify/sarama/records.go @@ -192,3 +192,18 @@ func magicValue(pd packetDecoder) (int8, error) { return dec.getInt8() } + +func (r *Records) getControlRecord() (ControlRecord, error) { + if r.RecordBatch == nil || len(r.RecordBatch.Records) <= 0 { + return ControlRecord{}, fmt.Errorf("cannot get control record, record batch is empty") + } + + firstRecord := r.RecordBatch.Records[0] + controlRecord := ControlRecord{} + err := controlRecord.decode(&realDecoder{raw: firstRecord.Key}, &realDecoder{raw: firstRecord.Value}) + if err != nil { + return ControlRecord{}, err + } + + return controlRecord, nil +} diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go index 4d211a14..5ed8ca4d 100644 --- a/vendor/github.com/Shopify/sarama/request.go +++ b/vendor/github.com/Shopify/sarama/request.go @@ -20,51 +20,67 @@ type request struct { body protocolBody } -func (r *request) encode(pe packetEncoder) (err error) { +func (r *request) encode(pe packetEncoder) error { pe.push(&lengthField{}) pe.putInt16(r.body.key()) pe.putInt16(r.body.version()) pe.putInt32(r.correlationID) - err = pe.putString(r.clientID) + + err := pe.putString(r.clientID) if err != nil { return err } + err = r.body.encode(pe) if err != nil { return err } + return pe.pop() } func (r *request) decode(pd packetDecoder) (err error) { - var key int16 - if key, err = pd.getInt16(); err != nil { + key, err := pd.getInt16() + if err != nil { return err } - var version int16 - if version, err = pd.getInt16(); err != nil { + + version, err := pd.getInt16() + if err != nil { return err } - if r.correlationID, err = pd.getInt32(); err != nil { + + r.correlationID, err = pd.getInt32() + if err != nil { return err } + r.clientID, err = pd.getString() + if err != nil { + return err + } r.body = allocateBody(key, version) if r.body == nil { return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)} } + return r.body.decode(pd, version) } -func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) { - lengthBytes := make([]byte, 4) +func decodeRequest(r io.Reader) (*request, int, error) { + var ( + bytesRead int + lengthBytes = make([]byte, 4) + ) + if _, err := io.ReadFull(r, lengthBytes); err != nil { return nil, bytesRead, err } - bytesRead += len(lengthBytes) + bytesRead += len(lengthBytes) length := int32(binary.BigEndian.Uint32(lengthBytes)) + if length <= 4 || length > MaxRequestSize { return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} } @@ -73,12 +89,14 @@ func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) { if _, err := io.ReadFull(r, encodedReq); err != nil { return nil, bytesRead, err } + bytesRead += len(encodedReq) - req = &request{} + req := &request{} if err := decode(encodedReq, req); err != nil { return nil, bytesRead, err } + return req, bytesRead, nil } @@ -140,6 +158,8 @@ func allocateBody(key, version int16) protocolBody { return &DescribeConfigsRequest{} case 33: return &AlterConfigsRequest{} + case 36: + return &SaslAuthenticateRequest{} case 37: return &CreatePartitionsRequest{} case 42: diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go index f3f4d27d..7a759185 100644 --- a/vendor/github.com/Shopify/sarama/response_header.go +++ b/vendor/github.com/Shopify/sarama/response_header.go @@ -2,6 +2,9 @@ package sarama import "fmt" +const responseLengthSize = 4 +const correlationIDSize = 4 + type responseHeader struct { length int32 correlationID int32 diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go index 7d5dc60d..1e0277ae 100644 --- a/vendor/github.com/Shopify/sarama/sarama.go +++ b/vendor/github.com/Shopify/sarama/sarama.go @@ -10,10 +10,7 @@ useful but comes with two caveats: it will generally be less efficient, and the depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost. -To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic -consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the -https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 -and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. +To consume messages, use Consumer or Consumer-Group API. For lower-level needs, the Broker and Request/Response objects permit precise control over each connection and message sent on the wire; the Client provides higher-level metadata management that is shared between @@ -61,6 +58,14 @@ Producer related metrics: | compression-ratio-for-topic- | histogram | Distribution of the compression ratio times 100 of record batches for a given topic | +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ +Consumer related metrics: + + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | Name | Type | Description | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | consumer-batch-size | histogram | Distribution of the number of messages in a batch | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + */ package sarama @@ -69,10 +74,29 @@ import ( "log" ) -// Logger is the instance of a StdLogger interface that Sarama writes connection -// management events to. By default it is set to discard all log messages via ioutil.Discard, -// but you can set it to redirect wherever you want. -var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags) +var ( + // Logger is the instance of a StdLogger interface that Sarama writes connection + // management events to. By default it is set to discard all log messages via ioutil.Discard, + // but you can set it to redirect wherever you want. + Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags) + + // PanicHandler is called for recovering from panics spawned internally to the library (and thus + // not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered. + PanicHandler func(interface{}) + + // MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying + // to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned + // with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt + // to process. + MaxRequestSize int32 = 100 * 1024 * 1024 + + // MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If + // a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to + // protect the client from running out of memory. Please note that brokers do not have any natural limit on + // the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers + // (see https://issues.apache.org/jira/browse/KAFKA-2063). + MaxResponseSize int32 = 100 * 1024 * 1024 +) // StdLogger is used to log error messages. type StdLogger interface { @@ -80,20 +104,3 @@ type StdLogger interface { Printf(format string, v ...interface{}) Println(v ...interface{}) } - -// PanicHandler is called for recovering from panics spawned internally to the library (and thus -// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered. -var PanicHandler func(interface{}) - -// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying -// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned -// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt -// to process. -var MaxRequestSize int32 = 100 * 1024 * 1024 - -// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If -// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to -// protect the client from running out of memory. Please note that brokers do not have any natural limit on -// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers -// (see https://issues.apache.org/jira/browse/KAFKA-2063). -var MaxResponseSize int32 = 100 * 1024 * 1024 diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go new file mode 100644 index 00000000..54c8b099 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go @@ -0,0 +1,29 @@ +package sarama + +type SaslAuthenticateRequest struct { + SaslAuthBytes []byte +} + +// APIKeySASLAuth is the API key for the SaslAuthenticate Kafka API +const APIKeySASLAuth = 36 + +func (r *SaslAuthenticateRequest) encode(pe packetEncoder) error { + return pe.putBytes(r.SaslAuthBytes) +} + +func (r *SaslAuthenticateRequest) decode(pd packetDecoder, version int16) (err error) { + r.SaslAuthBytes, err = pd.getBytes() + return err +} + +func (r *SaslAuthenticateRequest) key() int16 { + return APIKeySASLAuth +} + +func (r *SaslAuthenticateRequest) version() int16 { + return 0 +} + +func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion { + return V1_0_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go new file mode 100644 index 00000000..0038c3f3 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go @@ -0,0 +1,44 @@ +package sarama + +type SaslAuthenticateResponse struct { + Err KError + ErrorMessage *string + SaslAuthBytes []byte +} + +func (r *SaslAuthenticateResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + if err := pe.putNullableString(r.ErrorMessage); err != nil { + return err + } + return pe.putBytes(r.SaslAuthBytes) +} + +func (r *SaslAuthenticateResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + if r.ErrorMessage, err = pd.getNullableString(); err != nil { + return err + } + + r.SaslAuthBytes, err = pd.getBytes() + + return err +} + +func (r *SaslAuthenticateResponse) key() int16 { + return APIKeySASLAuth +} + +func (r *SaslAuthenticateResponse) version() int16 { + return 0 +} + +func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion { + return V1_0_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go index fbbc8947..fe5ba050 100644 --- a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go @@ -2,6 +2,7 @@ package sarama type SaslHandshakeRequest struct { Mechanism string + Version int16 } func (r *SaslHandshakeRequest) encode(pe packetEncoder) error { @@ -25,7 +26,7 @@ func (r *SaslHandshakeRequest) key() int16 { } func (r *SaslHandshakeRequest) version() int16 { - return 0 + return r.Version } func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion { diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go index 7dcbf034..5c3f9944 100644 --- a/vendor/github.com/Shopify/sarama/utils.go +++ b/vendor/github.com/Shopify/sarama/utils.go @@ -159,6 +159,7 @@ var ( V2_0_0_0 = newKafkaVersion(2, 0, 0, 0) V2_0_1_0 = newKafkaVersion(2, 0, 1, 0) V2_1_0_0 = newKafkaVersion(2, 1, 0, 0) + V2_2_0_0 = newKafkaVersion(2, 2, 0, 0) SupportedVersions = []KafkaVersion{ V0_8_2_0, @@ -181,11 +182,13 @@ var ( V2_0_0_0, V2_0_1_0, V2_1_0_0, + V2_2_0_0, } MinVersion = V0_8_2_0 - MaxVersion = V2_1_0_0 + MaxVersion = V2_2_0_0 ) +//ParseKafkaVersion parses and returns kafka version or error from a string func ParseKafkaVersion(s string) (KafkaVersion, error) { if len(s) < 5 { return MinVersion, fmt.Errorf("invalid version `%s`", s) diff --git a/vendor/golang.org/x/net/internal/socks/client.go b/vendor/golang.org/x/net/internal/socks/client.go new file mode 100644 index 00000000..3d6f516a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/client.go @@ -0,0 +1,168 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" + "time" +) + +var ( + noDeadline = time.Time{} + aLongTimeAgo = time.Unix(1, 0) +) + +func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) { + host, port, err := splitHostPort(address) + if err != nil { + return nil, err + } + if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() { + c.SetDeadline(deadline) + defer c.SetDeadline(noDeadline) + } + if ctx != context.Background() { + errCh := make(chan error, 1) + done := make(chan struct{}) + defer func() { + close(done) + if ctxErr == nil { + ctxErr = <-errCh + } + }() + go func() { + select { + case <-ctx.Done(): + c.SetDeadline(aLongTimeAgo) + errCh <- ctx.Err() + case <-done: + errCh <- nil + } + }() + } + + b := make([]byte, 0, 6+len(host)) // the size here is just an estimate + b = append(b, Version5) + if len(d.AuthMethods) == 0 || d.Authenticate == nil { + b = append(b, 1, byte(AuthMethodNotRequired)) + } else { + ams := d.AuthMethods + if len(ams) > 255 { + return nil, errors.New("too many authentication methods") + } + b = append(b, byte(len(ams))) + for _, am := range ams { + b = append(b, byte(am)) + } + } + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + am := AuthMethod(b[1]) + if am == AuthMethodNoAcceptableMethods { + return nil, errors.New("no acceptable authentication methods") + } + if d.Authenticate != nil { + if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil { + return + } + } + + b = b[:0] + b = append(b, Version5, byte(d.cmd), 0) + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + b = append(b, AddrTypeIPv4) + b = append(b, ip4...) + } else if ip6 := ip.To16(); ip6 != nil { + b = append(b, AddrTypeIPv6) + b = append(b, ip6...) + } else { + return nil, errors.New("unknown address type") + } + } else { + if len(host) > 255 { + return nil, errors.New("FQDN too long") + } + b = append(b, AddrTypeFQDN) + b = append(b, byte(len(host))) + b = append(b, host...) + } + b = append(b, byte(port>>8), byte(port)) + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded { + return nil, errors.New("unknown error " + cmdErr.String()) + } + if b[2] != 0 { + return nil, errors.New("non-zero reserved field") + } + l := 2 + var a Addr + switch b[3] { + case AddrTypeIPv4: + l += net.IPv4len + a.IP = make(net.IP, net.IPv4len) + case AddrTypeIPv6: + l += net.IPv6len + a.IP = make(net.IP, net.IPv6len) + case AddrTypeFQDN: + if _, err := io.ReadFull(c, b[:1]); err != nil { + return nil, err + } + l += int(b[0]) + default: + return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3]))) + } + if cap(b) < l { + b = make([]byte, l) + } else { + b = b[:l] + } + if _, ctxErr = io.ReadFull(c, b); ctxErr != nil { + return + } + if a.IP != nil { + copy(a.IP, b) + } else { + a.Name = string(b[:len(b)-2]) + } + a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1]) + return &a, nil +} + +func splitHostPort(address string) (string, int, error) { + host, port, err := net.SplitHostPort(address) + if err != nil { + return "", 0, err + } + portnum, err := strconv.Atoi(port) + if err != nil { + return "", 0, err + } + if 1 > portnum || portnum > 0xffff { + return "", 0, errors.New("port number out of range " + port) + } + return host, portnum, nil +} diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go new file mode 100644 index 00000000..6929a9fd --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/socks.go @@ -0,0 +1,317 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socks provides a SOCKS version 5 client implementation. +// +// SOCKS protocol version 5 is defined in RFC 1928. +// Username/Password authentication for SOCKS version 5 is defined in +// RFC 1929. +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" +) + +// A Command represents a SOCKS command. +type Command int + +func (cmd Command) String() string { + switch cmd { + case CmdConnect: + return "socks connect" + case cmdBind: + return "socks bind" + default: + return "socks " + strconv.Itoa(int(cmd)) + } +} + +// An AuthMethod represents a SOCKS authentication method. +type AuthMethod int + +// A Reply represents a SOCKS command reply code. +type Reply int + +func (code Reply) String() string { + switch code { + case StatusSucceeded: + return "succeeded" + case 0x01: + return "general SOCKS server failure" + case 0x02: + return "connection not allowed by ruleset" + case 0x03: + return "network unreachable" + case 0x04: + return "host unreachable" + case 0x05: + return "connection refused" + case 0x06: + return "TTL expired" + case 0x07: + return "command not supported" + case 0x08: + return "address type not supported" + default: + return "unknown code: " + strconv.Itoa(int(code)) + } +} + +// Wire protocol constants. +const ( + Version5 = 0x05 + + AddrTypeIPv4 = 0x01 + AddrTypeFQDN = 0x03 + AddrTypeIPv6 = 0x04 + + CmdConnect Command = 0x01 // establishes an active-open forward proxy connection + cmdBind Command = 0x02 // establishes a passive-open forward proxy connection + + AuthMethodNotRequired AuthMethod = 0x00 // no authentication required + AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password + AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods + + StatusSucceeded Reply = 0x00 +) + +// An Addr represents a SOCKS-specific address. +// Either Name or IP is used exclusively. +type Addr struct { + Name string // fully-qualified domain name + IP net.IP + Port int +} + +func (a *Addr) Network() string { return "socks" } + +func (a *Addr) String() string { + if a == nil { + return "" + } + port := strconv.Itoa(a.Port) + if a.IP == nil { + return net.JoinHostPort(a.Name, port) + } + return net.JoinHostPort(a.IP.String(), port) +} + +// A Conn represents a forward proxy connection. +type Conn struct { + net.Conn + + boundAddr net.Addr +} + +// BoundAddr returns the address assigned by the proxy server for +// connecting to the command target address from the proxy server. +func (c *Conn) BoundAddr() net.Addr { + if c == nil { + return nil + } + return c.boundAddr +} + +// A Dialer holds SOCKS-specific options. +type Dialer struct { + cmd Command // either CmdConnect or cmdBind + proxyNetwork string // network between a proxy server and a client + proxyAddress string // proxy server address + + // ProxyDial specifies the optional dial function for + // establishing the transport connection. + ProxyDial func(context.Context, string, string) (net.Conn, error) + + // AuthMethods specifies the list of request authention + // methods. + // If empty, SOCKS client requests only AuthMethodNotRequired. + AuthMethods []AuthMethod + + // Authenticate specifies the optional authentication + // function. It must be non-nil when AuthMethods is not empty. + // It must return an error when the authentication is failed. + Authenticate func(context.Context, io.ReadWriter, AuthMethod) error +} + +// DialContext connects to the provided address on the provided +// network. +// +// The returned error value may be a net.OpError. When the Op field of +// net.OpError contains "socks", the Source field contains a proxy +// server address and the Addr field contains a command target +// address. +// +// See func Dial of the net package of standard library for a +// description of the network and address parameters. +func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress) + } else { + var dd net.Dialer + c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + a, err := d.connect(ctx, c, address) + if err != nil { + c.Close() + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return &Conn{Conn: c, boundAddr: a}, nil +} + +// DialWithConn initiates a connection from SOCKS server to the target +// network and address using the connection c that is already +// connected to the SOCKS server. +// +// It returns the connection's local address assigned by the SOCKS +// server. +func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + a, err := d.connect(ctx, c, address) + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return a, nil +} + +// Dial connects to the provided address on the provided network. +// +// Unlike DialContext, it returns a raw transport connection instead +// of a forward proxy connection. +// +// Deprecated: Use DialContext or DialWithConn instead. +func (d *Dialer) Dial(network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress) + } else { + c, err = net.Dial(d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil { + c.Close() + return nil, err + } + return c, nil +} + +func (d *Dialer) validateTarget(network, address string) error { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return errors.New("network not implemented") + } + switch d.cmd { + case CmdConnect, cmdBind: + default: + return errors.New("command not implemented") + } + return nil +} + +func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) { + for i, s := range []string{d.proxyAddress, address} { + host, port, err := splitHostPort(s) + if err != nil { + return nil, nil, err + } + a := &Addr{Port: port} + a.IP = net.ParseIP(host) + if a.IP == nil { + a.Name = host + } + if i == 0 { + proxy = a + } else { + dst = a + } + } + return +} + +// NewDialer returns a new Dialer that dials through the provided +// proxy server's network and address. +func NewDialer(network, address string) *Dialer { + return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect} +} + +const ( + authUsernamePasswordVersion = 0x01 + authStatusSucceeded = 0x00 +) + +// UsernamePassword are the credentials for the username/password +// authentication method. +type UsernamePassword struct { + Username string + Password string +} + +// Authenticate authenticates a pair of username and password with the +// proxy server. +func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error { + switch auth { + case AuthMethodNotRequired: + return nil + case AuthMethodUsernamePassword: + if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) == 0 || len(up.Password) > 255 { + return errors.New("invalid username/password") + } + b := []byte{authUsernamePasswordVersion} + b = append(b, byte(len(up.Username))) + b = append(b, up.Username...) + b = append(b, byte(len(up.Password))) + b = append(b, up.Password...) + // TODO(mikio): handle IO deadlines and cancelation if + // necessary + if _, err := rw.Write(b); err != nil { + return err + } + if _, err := io.ReadFull(rw, b[:2]); err != nil { + return err + } + if b[0] != authUsernamePasswordVersion { + return errors.New("invalid username/password version") + } + if b[1] != authStatusSucceeded { + return errors.New("username/password authentication failed") + } + return nil + } + return errors.New("unsupported authentication method " + strconv.Itoa(int(auth))) +} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go new file mode 100644 index 00000000..4c5ad88b --- /dev/null +++ b/vendor/golang.org/x/net/proxy/direct.go @@ -0,0 +1,18 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "net" +) + +type direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var Direct = direct{} + +func (direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go new file mode 100644 index 00000000..0689bb6a --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -0,0 +1,140 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "net" + "strings" +) + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type PerHost struct { + def, bypass Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func NewPerHost(defaultDialer, bypass Dialer) *PerHost { + return &PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *PerHost) dialerForRequest(host string) Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go new file mode 100644 index 00000000..553ead7c --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy.go @@ -0,0 +1,134 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proxy provides support for a variety of protocols to proxy network +// data. +package proxy // import "golang.org/x/net/proxy" + +import ( + "errors" + "net" + "net/url" + "os" + "sync" +) + +// A Dialer is a means to establish a connection. +type Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func FromEnvironment() Dialer { + allProxy := allProxyEnv.Get() + if len(allProxy) == 0 { + return Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return Direct + } + proxy, err := FromURL(proxyURL, Direct) + if err != nil { + return Direct + } + + noProxy := noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := NewPerHost(proxy, Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) { + if proxySchemes == nil { + proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error)) + } + proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func FromURL(u *url.URL, forward Dialer) (Dialer, error) { + var auth *Auth + if u.User != nil { + auth = new(Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxySchemes != nil { + if f, ok := proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + allProxyEnv = &envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + noProxyEnv = &envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type envOnce struct { + names []string + once sync.Once + val string +} + +func (e *envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// reset is used by tests +func (e *envOnce) reset() { + e.once = sync.Once{} + e.val = "" +} diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go new file mode 100644 index 00000000..56345ec8 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/socks5.go @@ -0,0 +1,36 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" + + "golang.org/x/net/internal/socks" +) + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given +// address with an optional username and password. +// See RFC 1928 and RFC 1929. +func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) { + d := socks.NewDialer(network, address) + if forward != nil { + d.ProxyDial = func(_ context.Context, network string, address string) (net.Conn, error) { + return forward.Dial(network, address) + } + } + if auth != nil { + up := socks.UsernamePassword{ + Username: auth.User, + Password: auth.Password, + } + d.AuthMethods = []socks.AuthMethod{ + socks.AuthMethodNotRequired, + socks.AuthMethodUsernamePassword, + } + d.Authenticate = up.Authenticate + } + return d, nil +}