diff --git a/orderer/kafka/broker.go b/orderer/kafka/broker.go deleted file mode 100644 index 5e0eeb83cb6..00000000000 --- a/orderer/kafka/broker.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kafka - -import ( - "fmt" - - "github.com/Shopify/sarama" -) - -// Broker allows the caller to get info on the cluster's partitions -type Broker interface { - GetOffset(cp ChainPartition, req *sarama.OffsetRequest) (int64, error) - Closeable -} - -type brokerImpl struct { - broker *sarama.Broker -} - -// Connects to the broker that handles all produce and consume -// requests for the given chain (Partition Leader Replica) -func newBroker(brokers []string, cp ChainPartition) (Broker, error) { - var candidateBroker, connectedBroker, leaderBroker *sarama.Broker - - // Connect to one of the given brokers - for _, hostPort := range brokers { - candidateBroker = sarama.NewBroker(hostPort) - if err := candidateBroker.Open(nil); err != nil { - logger.Warningf("Failed to connect to broker %s: %s", hostPort, err) - continue - } - if connected, err := candidateBroker.Connected(); !connected { - logger.Warningf("Failed to connect to broker %s: %s", hostPort, err) - continue - } - connectedBroker = candidateBroker - break - } - - if connectedBroker == nil { - return nil, fmt.Errorf("failed to connect to any of the given brokers (%v) for metadata request", brokers) - } - logger.Debugf("Connected to broker %s", connectedBroker.Addr()) - - // Get metadata for the topic that corresponds to this chain - metadata, err := connectedBroker.GetMetadata(&sarama.MetadataRequest{Topics: []string{cp.Topic()}}) - if err != nil { - return nil, fmt.Errorf("failed to get metadata for topic %s: %s", cp, err) - } - - // Get the leader broker for this chain partition - if (cp.Partition() >= 0) && (cp.Partition() < int32(len(metadata.Topics[0].Partitions))) { - leaderBrokerID := metadata.Topics[0].Partitions[cp.Partition()].Leader - // ATTN: If we ever switch to more than one partition per topic, the message - // below should be updated to print `cp` (i.e. Topic/Partition) instead of - // `cp.Topic()`. - logger.Debugf("[channel: %s] Leading broker: %d", cp.Topic(), leaderBrokerID) - for _, availableBroker := range metadata.Brokers { - if availableBroker.ID() == leaderBrokerID { - leaderBroker = availableBroker - break - } - } - } - - if leaderBroker == nil { - // ATTN: If we ever switch to more than one partition per topic, the message - // below should be updated to print `cp` (i.e. Topic/Partition) instead of - // `cp.Topic()`. - return nil, fmt.Errorf("[channel: %s] cannot find leader", cp.Topic()) - } - - // Connect to broker - if err := leaderBroker.Open(nil); err != nil { - return nil, fmt.Errorf("failed to connect to Kafka broker: %s", err) - } - if connected, err := leaderBroker.Connected(); !connected { - return nil, fmt.Errorf("failed to connect to Kafka broker: %s", err) - } - - return &brokerImpl{broker: leaderBroker}, nil -} - -// GetOffset retrieves the offset number that corresponds -// to the requested position in the log. -func (b *brokerImpl) GetOffset(cp ChainPartition, req *sarama.OffsetRequest) (int64, error) { - resp, err := b.broker.GetAvailableOffsets(req) - if err != nil { - return int64(-1), err - } - return resp.GetBlock(cp.Topic(), cp.Partition()).Offsets[0], nil -} - -// Close terminates the broker. -// This is invoked by the session deliverer's getOffset method. -func (b *brokerImpl) Close() error { - return b.broker.Close() -} diff --git a/orderer/kafka/broker_mock_test.go b/orderer/kafka/broker_mock_test.go deleted file mode 100644 index 7304fcdff36..00000000000 --- a/orderer/kafka/broker_mock_test.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kafka - -import ( - "fmt" - "testing" - - "github.com/Shopify/sarama" -) - -type mockBrockerImpl struct { - brokerImpl - - mockBroker *sarama.MockBroker - handlerMap map[string]sarama.MockResponse -} - -func mockNewBroker(t *testing.T, cp ChainPartition) (Broker, error) { - mockBroker := sarama.NewMockBroker(t, testBrokerID) - handlerMap := make(map[string]sarama.MockResponse) - // The sarama mock package doesn't allow us to return an error - // for invalid offset requests, so we return an offset of -1. - // Note that the mock offset responses below imply a broker with - // newestOffset-1 blocks available. Therefore, if you are using this - // broker as part of a bigger test where you intend to consume blocks, - // make sure that the mockConsumer has been initialized accordingly - // (Set the 'offset' parameter to newestOffset-1.) - handlerMap["OffsetRequest"] = sarama.NewMockOffsetResponse(t). - SetOffset(cp.Topic(), cp.Partition(), sarama.OffsetOldest, testOldestOffset). - SetOffset(cp.Topic(), cp.Partition(), sarama.OffsetNewest, testNewestOffset) - mockBroker.SetHandlerByMap(handlerMap) - - broker := sarama.NewBroker(mockBroker.Addr()) - if err := broker.Open(nil); err != nil { - return nil, fmt.Errorf("cannot connect to mock broker: %s", err) - } - - return &mockBrockerImpl{ - brokerImpl: brokerImpl{ - broker: broker, - }, - mockBroker: mockBroker, - handlerMap: handlerMap, - }, nil -} - -func (mb *mockBrockerImpl) Close() error { - mb.mockBroker.Close() - return nil -} diff --git a/orderer/kafka/broker_test.go b/orderer/kafka/broker_test.go deleted file mode 100644 index b2fc9c092a2..00000000000 --- a/orderer/kafka/broker_test.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kafka - -import ( - "testing" - - "github.com/Shopify/sarama" - "github.com/hyperledger/fabric/common/configtx/tool/provisional" -) - -func TestBrokerGetOffset(t *testing.T) { - t.Run("oldest", testBrokerGetOffsetFunc(sarama.OffsetOldest, testOldestOffset)) - t.Run("newest", testBrokerGetOffsetFunc(sarama.OffsetNewest, testNewestOffset)) -} - -func testBrokerGetOffsetFunc(given, expected int64) func(t *testing.T) { - cp := newChainPartition(provisional.TestChainID, rawPartition) - return func(t *testing.T) { - mb, _ := mockNewBroker(t, cp) - defer testClose(t, mb) - - ofs, _ := mb.GetOffset(cp, newOffsetReq(cp, given)) - if ofs != expected { - t.Fatalf("Expected offset %d, got %d instead", expected, ofs) - } - } -} - -func TestNewBrokerReturnsPartitionLeader(t *testing.T) { - cp := newChainPartition(provisional.TestChainID, rawPartition) - broker1 := sarama.NewMockBroker(t, 1) - broker2 := sarama.NewMockBroker(t, 2) - broker3 := sarama.NewMockBroker(t, 3) - defer func() { - broker2.Close() - broker3.Close() - }() - - // Use broker1 and broker2 as bootstrap brokers, but shutdown broker1 right away - broker1.Close() - - // Add expectation that broker2 will return a metadata response - // that identifies broker3 as the topic partition leader - broker2.SetHandlerByMap(map[string]sarama.MockResponse{ - "MetadataRequest": sarama.NewMockMetadataResponse(t). - SetBroker(broker1.Addr(), broker1.BrokerID()). - SetBroker(broker2.Addr(), broker2.BrokerID()). - SetBroker(broker3.Addr(), broker3.BrokerID()). - SetLeader(cp.Topic(), cp.Partition(), broker3.BrokerID()), - }) - - // Add expectation that broker3 responds to an offset request - broker3.SetHandlerByMap(map[string]sarama.MockResponse{ - "OffsetRequest": sarama.NewMockOffsetResponse(t). - SetOffset(cp.Topic(), cp.Partition(), sarama.OffsetOldest, testOldestOffset). - SetOffset(cp.Topic(), cp.Partition(), sarama.OffsetNewest, testNewestOffset), - }) - - // Get leader for the test chain partition - leaderBroker, _ := newBroker([]string{broker1.Addr(), broker2.Addr()}, cp) - - // Only broker3 will respond successfully to an offset request - offsetRequest := new(sarama.OffsetRequest) - offsetRequest.AddBlock(cp.Topic(), cp.Partition(), -1, 1) - if _, err := leaderBroker.GetOffset(cp, offsetRequest); err != nil { - t.Fatal("Expected leader broker to respond to request:", err) - } -} diff --git a/orderer/kafka/chain.go b/orderer/kafka/chain.go new file mode 100644 index 00000000000..bcc4ad6cc74 --- /dev/null +++ b/orderer/kafka/chain.go @@ -0,0 +1,446 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package kafka + +import ( + "fmt" + "strconv" + "time" + + "github.com/Shopify/sarama" + "github.com/golang/protobuf/proto" + localconfig "github.com/hyperledger/fabric/orderer/localconfig" + "github.com/hyperledger/fabric/orderer/multichain" + cb "github.com/hyperledger/fabric/protos/common" + ab "github.com/hyperledger/fabric/protos/orderer" + "github.com/hyperledger/fabric/protos/utils" +) + +// Used for capturing metrics -- see processMessagesToBlocks +const ( + indexRecvError = iota + indexRecvPass + indexProcessConnectPass + indexProcessTimeToCutError + indexProcessTimeToCutPass + indexPprocessRegularError + indexProcessRegularPass + indexSendTimeToCutError + indexSendTimeToCutPass + indexExitChanPass +) + +func newChain(consenter commonConsenter, support multichain.ConsenterSupport, lastOffsetPersisted int64) (*chainImpl, error) { + lastCutBlockNumber := getLastCutBlockNumber(support.Height()) + logger.Infof("[channel: %s] Starting chain with last persisted offset %d and last recorded block %d", + support.ChainID(), lastOffsetPersisted, lastCutBlockNumber) + + return &chainImpl{ + consenter: consenter, + support: support, + channel: newChannel(support.ChainID(), defaultPartition), + lastOffsetPersisted: lastOffsetPersisted, + lastCutBlockNumber: lastCutBlockNumber, + halted: false, // Redundant as the default value for booleans is false but added for readability + exitChan: make(chan struct{}), + }, nil +} + +type chainImpl struct { + consenter commonConsenter + support multichain.ConsenterSupport + + channel channel + lastOffsetPersisted int64 + lastCutBlockNumber uint64 + + producer sarama.SyncProducer + parentConsumer sarama.Consumer + channelConsumer sarama.PartitionConsumer + + halted bool // For the Enqueue() calls + exitChan chan struct{} // For the Chain's Halt() method + + startCompleted bool // For testing +} + +// Start allocates the necessary resources for staying up to date with this +// Chain. Implements the multichain.Chain interface. Called by +// multichain.NewManagerImpl() which is invoked when the ordering process is +// launched, before the call to NewServer(). +func (chain *chainImpl) Start() { + var err error + + // Set up the producer + chain.producer, err = setupProducerForChannel(chain.support.SharedConfig().KafkaBrokers(), chain.consenter.brokerConfig(), chain.channel, chain.consenter.retryOptions()) + if err != nil { + logger.Criticalf("[channel: %s] Cannot set up producer = %s", chain.channel.topic(), err) + close(chain.exitChan) + chain.halted = true + return + } + logger.Infof("[channel: %s] Producer set up successfully", chain.support.ChainID()) + + // Have the producer post the CONNECT message + if err = sendConnectMessage(chain.producer, chain.channel); err != nil { + logger.Criticalf("[channel: %s] Cannot post CONNECT message = %s", chain.channel.topic(), err) + close(chain.exitChan) + chain.halted = true + chain.producer.Close() + return + } + logger.Infof("[channel: %s] CONNECT message posted successfully", chain.channel.topic()) + + // Set up the consumer + chain.parentConsumer, chain.channelConsumer, err = setupConsumerForChannel(chain.support.SharedConfig().KafkaBrokers(), chain.consenter.brokerConfig(), chain.channel, chain.lastOffsetPersisted+1) + if err != nil { + logger.Criticalf("[channel: %s] Cannot set up consumer = %s", chain.channel.topic(), err) + close(chain.exitChan) + chain.halted = true + chain.producer.Close() + return + } + logger.Infof("[channel: %s] Consumer set up successfully", chain.channel.topic()) + go listenForErrors(chain.channelConsumer.Errors(), chain.exitChan) + + // Keep up to date with the channel + go processMessagesToBlock(chain.support, chain.producer, chain.parentConsumer, chain.channelConsumer, + chain.channel, &chain.lastCutBlockNumber, &chain.halted, &chain.exitChan) + + chain.startCompleted = true +} + +// Halt frees the resources which were allocated for this Chain. Implements the +// multichain.Chain interface. +func (chain *chainImpl) Halt() { + select { + case <-chain.exitChan: + // This construct is useful because it allows Halt() to be called + // multiple times w/o panicking. Recal that a receive from a closed + // channel returns (the zero value) immediately. + logger.Warningf("[channel: %s] Halting of chain requested again", chain.support.ChainID()) + default: + logger.Criticalf("[channel: %s] Halting of chain requested", chain.support.ChainID()) + close(chain.exitChan) + } +} + +// Enqueue accepts a message and returns true on acceptance, or false on +// shutdown. Implements the multichain.Chain interface. Called by Broadcast. +func (chain *chainImpl) Enqueue(env *cb.Envelope) bool { + if chain.halted { + logger.Warningf("[channel: %s] Will not enqueue cause the chain has been halted", chain.support.ChainID()) + return false + } + + logger.Debugf("[channel: %s] Enqueueing envelope...", chain.support.ChainID()) + marshaledEnv, err := utils.Marshal(env) + if err != nil { + return false + } + payload := utils.MarshalOrPanic(newRegularMessage(marshaledEnv)) + message := newProducerMessage(chain.channel, payload) + if _, _, err := chain.producer.SendMessage(message); err != nil { + logger.Errorf("[channel: %s] cannot enqueue envelope = %s", chain.support.ChainID(), err) + return false + } + logger.Debugf("[channel: %s] Envelope enqueued successfully", chain.support.ChainID()) + + return !chain.halted // If ch.halted has been set to true while sending, we should return false +} + +// processMessagesToBlocks drains the Kafka consumer for the given channel, and +// takes care of converting the stream of ordered messages into blocks for the +// channel's ledger. NOTE: May need to rethink the model here, and turn this +// into a method. For the time being, we optimize for testability. +func processMessagesToBlock(support multichain.ConsenterSupport, producer sarama.SyncProducer, + parentConsumer sarama.Consumer, channelConsumer sarama.PartitionConsumer, + chn channel, lastCutBlockNumber *uint64, haltedFlag *bool, exitChan *chan struct{}) ([]uint64, error) { + msg := new(ab.KafkaMessage) + var timer <-chan time.Time + + counts := make([]uint64, 10) // For metrics and tests + + defer func() { + _ = closeLoop(chn.topic(), producer, parentConsumer, channelConsumer, haltedFlag) + logger.Infof("[channel: %s] Closed producer/consumer threads for channel and exiting loop", chn.topic()) + }() + + for { + select { + case in := <-channelConsumer.Messages(): + if err := proto.Unmarshal(in.Value, msg); err != nil { + // This shouldn't happen, it should be filtered at ingress + logger.Criticalf("[channel: %s] Unable to unmarshal consumed message = %s", chn.topic(), err) + counts[indexRecvError]++ + } else { + logger.Debugf("[channel: %s] Successfully unmarshalled consumed message, offset is %d. Inspecting type...", chn.topic(), in.Offset) + counts[indexRecvPass]++ + } + switch msg.Type.(type) { + case *ab.KafkaMessage_Connect: + _ = processConnect(chn.topic()) + counts[indexProcessConnectPass]++ + case *ab.KafkaMessage_TimeToCut: + if err := processTimeToCut(msg.GetTimeToCut(), support, lastCutBlockNumber, &timer, in.Offset); err != nil { + logger.Warningf("[channel: %s] %s", chn.topic(), err) + logger.Criticalf("[channel: %s] Consenter for channel exiting", chn.topic()) + counts[indexProcessTimeToCutError]++ + return counts, err // TODO Revisit whether we should indeed stop processing the chain at this point + } + counts[indexProcessTimeToCutPass]++ + case *ab.KafkaMessage_Regular: + if err := processRegular(msg.GetRegular(), support, &timer, in.Offset, lastCutBlockNumber); err != nil { + logger.Warningf("[channel: %s] Error when processing incoming message of type REGULAR = %s", chn.topic(), err) + counts[indexPprocessRegularError]++ + } else { + counts[indexProcessRegularPass]++ + } + } + case <-timer: + if err := sendTimeToCut(producer, chn, (*lastCutBlockNumber)+1, &timer); err != nil { + logger.Errorf("[channel: %s] cannot post time-to-cut message = %s", chn.topic(), err) + // Do not return though + counts[indexSendTimeToCutError]++ + } else { + counts[indexSendTimeToCutPass]++ + } + case <-*exitChan: // When Halt() is called + logger.Warningf("[channel: %s] Consenter for channel exiting", chn.topic()) + counts[indexExitChanPass]++ + return counts, nil + } + } +} + +// Helper functions + +func closeLoop(channelName string, producer sarama.SyncProducer, parentConsumer sarama.Consumer, channelConsumer sarama.PartitionConsumer, haltedFlag *bool) []error { + var errs []error + + *haltedFlag = true + + err := channelConsumer.Close() + if err != nil { + logger.Errorf("[channel: %s] could not close channelConsumer cleanly = %s", channelName, err) + errs = append(errs, err) + } else { + logger.Debugf("[channel: %s] Closed the channel consumer", channelName) + } + + err = parentConsumer.Close() + if err != nil { + logger.Errorf("[channel: %s] could not close parentConsumer cleanly = %s", channelName, err) + errs = append(errs, err) + } else { + logger.Debugf("[channel: %s] Closed the parent consumer", channelName) + } + + err = producer.Close() + if err != nil { + logger.Errorf("[channel: %s] could not close producer cleanly = %s", channelName, err) + errs = append(errs, err) + } else { + logger.Debugf("[channel: %s] Closed the producer", channelName) + } + + return errs +} + +func getLastCutBlockNumber(blockchainHeight uint64) uint64 { + return blockchainHeight - 1 +} + +func getLastOffsetPersisted(metadataValue []byte, chainID string) int64 { + if metadataValue != nil { + // Extract orderer-related metadata from the tip of the ledger first + kafkaMetadata := &ab.KafkaMetadata{} + if err := proto.Unmarshal(metadataValue, kafkaMetadata); err != nil { + logger.Panicf("[channel: %s] Ledger may be corrupted:"+ + "cannot unmarshal orderer metadata in most recent block", chainID) + } + return kafkaMetadata.LastOffsetPersisted + } + return (sarama.OffsetOldest - 1) // default +} + +func listenForErrors(errChan <-chan *sarama.ConsumerError, exitChan <-chan struct{}) error { + select { + case <-exitChan: + return nil + case err := <-errChan: + logger.Error(err) + return err + } +} + +func newConnectMessage() *ab.KafkaMessage { + return &ab.KafkaMessage{ + Type: &ab.KafkaMessage_Connect{ + Connect: &ab.KafkaMessageConnect{ + Payload: nil, + }, + }, + } +} + +func newRegularMessage(payload []byte) *ab.KafkaMessage { + return &ab.KafkaMessage{ + Type: &ab.KafkaMessage_Regular{ + Regular: &ab.KafkaMessageRegular{ + Payload: payload, + }, + }, + } +} + +func newTimeToCutMessage(blockNumber uint64) *ab.KafkaMessage { + return &ab.KafkaMessage{ + Type: &ab.KafkaMessage_TimeToCut{ + TimeToCut: &ab.KafkaMessageTimeToCut{ + BlockNumber: blockNumber, + }, + }, + } +} + +func newProducerMessage(chn channel, pld []byte) *sarama.ProducerMessage { + return &sarama.ProducerMessage{ + Topic: chn.topic(), + Key: sarama.StringEncoder(strconv.Itoa(int(chn.partition()))), // TODO Consider writing an IntEncoder? + Value: sarama.ByteEncoder(pld), + } +} + +func processConnect(channelName string) error { + logger.Debugf("[channel: %s] It's a connect message - ignoring", channelName) + return nil +} + +func processRegular(regularMessage *ab.KafkaMessageRegular, support multichain.ConsenterSupport, timer *<-chan time.Time, receivedOffset int64, lastCutBlockNumber *uint64) error { + env := new(cb.Envelope) + if err := proto.Unmarshal(regularMessage.Payload, env); err != nil { + // This shouldn't happen, it should be filtered at ingress + return fmt.Errorf("unmarshal/%s", err) + } + batches, committers, ok := support.BlockCutter().Ordered(env) + logger.Debugf("[channel: %s] Ordering results: items in batch = %d, ok = %v", support.ChainID(), len(batches), ok) + if ok && len(batches) == 0 && *timer == nil { + *timer = time.After(support.SharedConfig().BatchTimeout()) + logger.Debugf("[channel: %s] Just began %s batch timer", support.ChainID(), support.SharedConfig().BatchTimeout().String()) + return nil + } + // If !ok, batches == nil, so this will be skipped + for i, batch := range batches { + // If more than one batch is produced, exactly 2 batches are produced. + // The receivedOffset for the first batch is one less than the supplied + // offset to this function. + offset := receivedOffset - int64(len(batches)-i-1) + block := support.CreateNextBlock(batch) + encodedLastOffsetPersisted := utils.MarshalOrPanic(&ab.KafkaMetadata{LastOffsetPersisted: offset}) + support.WriteBlock(block, committers[i], encodedLastOffsetPersisted) + *lastCutBlockNumber++ + logger.Debugf("[channel: %s] Batch filled, just cut block %d - last persisted offset is now %d", support.ChainID(), *lastCutBlockNumber, offset) + } + if len(batches) > 0 { + *timer = nil + } + return nil +} + +func processTimeToCut(ttcMessage *ab.KafkaMessageTimeToCut, support multichain.ConsenterSupport, lastCutBlockNumber *uint64, timer *<-chan time.Time, receivedOffset int64) error { + ttcNumber := ttcMessage.GetBlockNumber() + logger.Debugf("[channel: %s] It's a time-to-cut message for block %d", support.ChainID(), ttcNumber) + if ttcNumber == *lastCutBlockNumber+1 { + *timer = nil + logger.Debugf("[channel: %s] Nil'd the timer", support.ChainID()) + batch, committers := support.BlockCutter().Cut() + if len(batch) == 0 { + return fmt.Errorf("got right time-to-cut message (for block %d),"+ + " no pending requests though; this might indicate a bug", *lastCutBlockNumber+1) + } + block := support.CreateNextBlock(batch) + encodedLastOffsetPersisted := utils.MarshalOrPanic(&ab.KafkaMetadata{LastOffsetPersisted: receivedOffset}) + support.WriteBlock(block, committers, encodedLastOffsetPersisted) + *lastCutBlockNumber++ + logger.Debugf("[channel: %s] Proper time-to-cut received, just cut block %d", support.ChainID(), *lastCutBlockNumber) + return nil + } else if ttcNumber > *lastCutBlockNumber+1 { + return fmt.Errorf("got larger time-to-cut message (%d) than allowed/expected (%d)"+ + " - this might indicate a bug", ttcNumber, *lastCutBlockNumber+1) + } + logger.Debugf("[channel: %s] Ignoring stale time-to-cut-message for block %d", support.ChainID(), ttcNumber) + return nil +} + +// Post a CONNECT message to the channel. This prevents the panicking that would +// occur if we were to set up a consumer and seek on a partition that hadn't +// been written to yet. +func sendConnectMessage(producer sarama.SyncProducer, channel channel) error { + logger.Infof("[channel: %s] Posting the CONNECT message...", channel.topic()) + payload := utils.MarshalOrPanic(newConnectMessage()) + message := newProducerMessage(channel, payload) + _, _, err := producer.SendMessage(message) + return err +} + +func sendTimeToCut(producer sarama.SyncProducer, channel channel, timeToCutBlockNumber uint64, timer *<-chan time.Time) error { + logger.Debugf("[channel: %s] Time-to-cut block %d timer expired", channel.topic(), timeToCutBlockNumber) + *timer = nil + payload := utils.MarshalOrPanic(newTimeToCutMessage(timeToCutBlockNumber)) + message := newProducerMessage(channel, payload) + _, _, err := producer.SendMessage(message) + return err +} + +// Sets up the listener/consumer for a channel. +func setupConsumerForChannel(brokers []string, brokerConfig *sarama.Config, channel channel, startFrom int64) (sarama.Consumer, sarama.PartitionConsumer, error) { + logger.Infof("[channel: %s] Setting up the consumer for this channel...", channel.topic()) + + parentConsumer, err := sarama.NewConsumer(brokers, brokerConfig) + if err != nil { + return nil, nil, err + } + logger.Debugf("[channel: %s] Created new parent consumer", channel.topic()) + + channelConsumer, err := parentConsumer.ConsumePartition(channel.topic(), channel.partition(), startFrom) + if err != nil { + _ = parentConsumer.Close() + return nil, nil, err + } + logger.Debugf("[channel: %s] Created new channel consumer", channel.topic()) + + return parentConsumer, channelConsumer, nil +} + +// Sets up the writer/producer for a channel. +func setupProducerForChannel(brokers []string, brokerConfig *sarama.Config, channel channel, retryOptions localconfig.Retry) (sarama.SyncProducer, error) { + var err error + var producer sarama.SyncProducer + + // This will be revised in: https://jira.hyperledger.org/browse/FAB-4136 + repeatTick := time.NewTicker(retryOptions.Period) + panicTick := time.NewTicker(retryOptions.Stop) + logger.Debugf("[channel: %s] Retrying every %s for a total of %s", channel.topic(), retryOptions.Period.String(), retryOptions.Stop.String()) + defer repeatTick.Stop() + defer panicTick.Stop() + +loop: + for { + select { + case <-panicTick.C: + return nil, err + case <-repeatTick.C: + logger.Debugf("[channel: %s] Connecting to Kafka cluster: %s", channel.topic(), brokers) + if producer, err = sarama.NewSyncProducer(brokers, brokerConfig); err == nil { + break loop + } + } + } + + return producer, err +} diff --git a/orderer/kafka/chain_partition.go b/orderer/kafka/chain_partition.go deleted file mode 100644 index daaa06d3144..00000000000 --- a/orderer/kafka/chain_partition.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kafka - -import "fmt" - -const rawPartition = 0 - -// ChainPartition identifies the Kafka partition the orderer interacts with. -type ChainPartition interface { - Topic() string - Partition() int32 - fmt.Stringer -} - -type chainPartitionImpl struct { - tpc string - prt int32 -} - -// Returns a new chain partition for a given chain ID and partition. -func newChainPartition(chainID string, partition int32) ChainPartition { - return &chainPartitionImpl{ - tpc: fmt.Sprintf("%s", chainID), - prt: partition, - } -} - -// Topic returns the Kafka topic of this chain partition. -func (cp *chainPartitionImpl) Topic() string { - return cp.tpc -} - -// Partition returns the Kafka partition of this chain partition. -func (cp *chainPartitionImpl) Partition() int32 { - return cp.prt -} - -// String returns a string identifying the chain partition. -func (cp *chainPartitionImpl) String() string { - return fmt.Sprintf("%s/%d", cp.tpc, cp.prt) -} diff --git a/orderer/kafka/chain_partition_test.go b/orderer/kafka/chain_partition_test.go deleted file mode 100644 index 152439e5ee6..00000000000 --- a/orderer/kafka/chain_partition_test.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kafka - -import ( - "fmt" - "strings" - "testing" - - "github.com/hyperledger/fabric/common/configtx/tool/provisional" -) - -func TestChainPartition(t *testing.T) { - cp := newChainPartition(provisional.TestChainID, rawPartition) - - expectedTopic := fmt.Sprintf("%s", provisional.TestChainID) - actualTopic := cp.Topic() - if strings.Compare(expectedTopic, actualTopic) != 0 { - t.Fatalf("Got the wrong topic, expected %s, got %s instead", expectedTopic, actualTopic) - } - - expectedPartition := int32(rawPartition) - actualPartition := cp.Partition() - if actualPartition != expectedPartition { - t.Fatalf("Got the wrong partition, expected %d, got %d instead", expectedPartition, actualPartition) - } -} diff --git a/orderer/kafka/chain_test.go b/orderer/kafka/chain_test.go new file mode 100644 index 00000000000..7b7c5268273 --- /dev/null +++ b/orderer/kafka/chain_test.go @@ -0,0 +1,1139 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package kafka + +import ( + "fmt" + "testing" + "time" + + "github.com/Shopify/sarama" + "github.com/Shopify/sarama/mocks" + mockconfig "github.com/hyperledger/fabric/common/mocks/config" + mockblockcutter "github.com/hyperledger/fabric/orderer/mocks/blockcutter" + mockmultichain "github.com/hyperledger/fabric/orderer/mocks/multichain" + cb "github.com/hyperledger/fabric/protos/common" + ab "github.com/hyperledger/fabric/protos/orderer" + "github.com/hyperledger/fabric/protos/utils" + "github.com/stretchr/testify/assert" +) + +var hitBranch = 50 * time.Millisecond + +func TestChain(t *testing.T) { + oldestOffset := int64(0) + newestOffset := int64(5) + message := sarama.StringEncoder("messageFoo") + + mockChannel := newChannel("channelFoo", defaultPartition) + + mockBroker := sarama.NewMockBroker(t, 0) + mockBroker.SetHandlerByMap(map[string]sarama.MockResponse{ + "MetadataRequest": sarama.NewMockMetadataResponse(t). + SetBroker(mockBroker.Addr(), mockBroker.BrokerID()). + SetLeader(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID()), + "ProduceRequest": sarama.NewMockProduceResponse(t). + SetError(mockChannel.topic(), mockChannel.partition(), sarama.ErrNoError), + "OffsetRequest": sarama.NewMockOffsetResponse(t). + SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetOldest, oldestOffset). + SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetNewest, newestOffset), + "FetchRequest": sarama.NewMockFetchResponse(t, 1). + SetMessage(mockChannel.topic(), mockChannel.partition(), newestOffset, message), + }) + + mockSupport := &mockmultichain.ConsenterSupport{ + ChainIDVal: mockChannel.topic(), + HeightVal: uint64(3), + SharedConfigVal: &mockconfig.Orderer{KafkaBrokersVal: []string{mockBroker.Addr()}}, + } + + t.Run("New", func(t *testing.T) { + _, err := newChain(mockConsenter, mockSupport, newestOffset-1) + assert.NoError(t, err, "Expected newChain to return without errors") + }) + + t.Run("Start", func(t *testing.T) { + chain, _ := newChain(mockConsenter, mockSupport, newestOffset-1) // -1 because we haven't set the CONNECT message yet + chain.Start() + assert.Equal(t, true, chain.startCompleted, "Expected chain.startCompleted flag to be set to true") + assert.Equal(t, false, chain.halted, "Expected chain.halted flag to be set to false") + close(chain.exitChan) + }) + + t.Run("Halt", func(t *testing.T) { + chain, _ := newChain(mockConsenter, mockSupport, newestOffset-1) + chain.Start() + chain.Halt() + _, ok := <-chain.exitChan + assert.Equal(t, false, ok, "Expected chain.exitChan to be closed") + time.Sleep(50 * time.Millisecond) // Let closeLoop() do its thing -- TODO Hacky, revise approach + assert.Equal(t, true, chain.halted, "Expected chain.halted flag to be set true") + }) + + t.Run("DoubleHalt", func(t *testing.T) { + chain, _ := newChain(mockConsenter, mockSupport, newestOffset-1) + chain.Start() + chain.Halt() + + assert.NotPanics(t, func() { chain.Halt() }, "Calling Halt() more than once shouldn't panic") + + _, ok := <-chain.exitChan + assert.Equal(t, false, ok, "Expected chain.exitChan to be closed") + }) + + t.Run("StartWithProducerForChannelError", func(t *testing.T) { + mockSupportCopy := *mockSupport + mockSupportCopy.SharedConfigVal = &mockconfig.Orderer{KafkaBrokersVal: []string{}} + + chain, _ := newChain(mockConsenter, &mockSupportCopy, newestOffset-1) + + chain.Start() + + assert.Equal(t, false, chain.startCompleted, "Expected chain.startCompleted flag to be set to false") + assert.Equal(t, true, chain.halted, "Expected chain.halted flag to be set to set to true") + _, ok := <-chain.exitChan + assert.Equal(t, false, ok, "Expected chain.exitChan to be closed") + }) + + t.Run("StartWithConnectMessageError", func(t *testing.T) { + mockBrokerConfigCopy := *mockBrokerConfig + mockBrokerConfigCopy.Net.ReadTimeout = 5 * time.Millisecond + mockBrokerConfigCopy.Consumer.Retry.Backoff = 5 * time.Millisecond + mockBrokerConfigCopy.Metadata.Retry.Max = 1 + + mockConsenterCopy := newMockConsenter(&mockBrokerConfigCopy, mockLocalConfig.General.TLS, mockLocalConfig.Kafka.Retry, mockLocalConfig.Kafka.Version) + + chain, _ := newChain(mockConsenterCopy, mockSupport, newestOffset-1) + + mockBroker.SetHandlerByMap(map[string]sarama.MockResponse{ + "MetadataRequest": sarama.NewMockMetadataResponse(t). + SetBroker(mockBroker.Addr(), mockBroker.BrokerID()). + SetLeader(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID()), + "ProduceRequest": sarama.NewMockProduceResponse(t). + SetError(mockChannel.topic(), mockChannel.partition(), sarama.ErrNotLeaderForPartition), + "OffsetRequest": sarama.NewMockOffsetResponse(t). + SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetOldest, oldestOffset). + SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetNewest, newestOffset), + "FetchRequest": sarama.NewMockFetchResponse(t, 1). + SetMessage(mockChannel.topic(), mockChannel.partition(), newestOffset, message), + }) + + chain.Start() + + assert.Equal(t, false, chain.startCompleted, "Expected chain.startCompleted flag to be set to false") + assert.Equal(t, true, chain.halted, "Expected chain.halted flag to be set to true") + _, ok := <-chain.exitChan + assert.Equal(t, false, ok, "Expected chain.exitChan to be closed") + }) + + t.Run("StartWithConsumerForChannelError", func(t *testing.T) { + mockBrokerConfigCopy := *mockBrokerConfig + mockBrokerConfigCopy.Net.ReadTimeout = 5 * time.Millisecond + mockBrokerConfigCopy.Consumer.Retry.Backoff = 5 * time.Millisecond + mockBrokerConfigCopy.Metadata.Retry.Max = 1 + + mockConsenterCopy := newMockConsenter(&mockBrokerConfigCopy, mockLocalConfig.General.TLS, mockLocalConfig.Kafka.Retry, mockLocalConfig.Kafka.Version) + + chain, _ := newChain(mockConsenterCopy, mockSupport, newestOffset) // Provide an out-of-range offset + + mockBroker.SetHandlerByMap(map[string]sarama.MockResponse{ + "MetadataRequest": sarama.NewMockMetadataResponse(t). + SetBroker(mockBroker.Addr(), mockBroker.BrokerID()). + SetLeader(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID()), + "ProduceRequest": sarama.NewMockProduceResponse(t). + SetError(mockChannel.topic(), mockChannel.partition(), sarama.ErrNoError), + "OffsetRequest": sarama.NewMockOffsetResponse(t). + SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetOldest, oldestOffset). + SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetNewest, newestOffset), + "FetchRequest": sarama.NewMockFetchResponse(t, 1). + SetMessage(mockChannel.topic(), mockChannel.partition(), newestOffset, message), + }) + + chain.Start() + + assert.Equal(t, false, chain.startCompleted, "Expected chain.startCompleted flag to be set to false") + assert.Equal(t, true, chain.halted, "Expected chain.halted flag to be set to set to true") + _, ok := <-chain.exitChan + assert.Equal(t, false, ok, "Expected chain.exitChan to be closed") + }) + + t.Run("Enqueue", func(t *testing.T) { + chain, _ := newChain(mockConsenter, mockSupport, newestOffset-1) + chain.Start() + + assert.Equal(t, true, chain.Enqueue(newMockEnvelope("fooMessage")), "Expected Enqueue call to return true") + + chain.Halt() + }) + + t.Run("EnqueueIfHalted", func(t *testing.T) { + chain, _ := newChain(mockConsenter, mockSupport, newestOffset-1) + chain.Start() + + chain.Halt() + time.Sleep(50 * time.Millisecond) // Let closeLoop() do its thing -- TODO Hacky, revise approach + assert.Equal(t, true, chain.halted, "Expected chain.halted flag to be set true") + + assert.Equal(t, false, chain.Enqueue(newMockEnvelope("fooMessage")), "Expected Enqueue call to return false") + }) + + t.Run("EnqueueError", func(t *testing.T) { + chain, _ := newChain(mockConsenter, mockSupport, newestOffset-1) + chain.Start() + + mockBroker.SetHandlerByMap(map[string]sarama.MockResponse{ + "MetadataRequest": sarama.NewMockMetadataResponse(t). + SetBroker(mockBroker.Addr(), mockBroker.BrokerID()). + SetLeader(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID()), + "ProduceRequest": sarama.NewMockProduceResponse(t). + SetError(mockChannel.topic(), mockChannel.partition(), sarama.ErrNotLeaderForPartition), + "OffsetRequest": sarama.NewMockOffsetResponse(t). + SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetOldest, oldestOffset). + SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetNewest, newestOffset), + "FetchRequest": sarama.NewMockFetchResponse(t, 1). + SetMessage(mockChannel.topic(), mockChannel.partition(), newestOffset, message), + }) + + assert.Equal(t, false, chain.Enqueue(newMockEnvelope("fooMessage")), "Expected Enqueue call to return false") + }) +} + +func TestCloseLoop(t *testing.T) { + startFrom := int64(3) + oldestOffset := int64(0) + newestOffset := int64(5) + + mockChannel := newChannel("channelFoo", defaultPartition) + message := sarama.StringEncoder("messageFoo") + + mockBroker := sarama.NewMockBroker(t, 0) + defer func() { mockBroker.Close() }() + mockBroker.SetHandlerByMap(map[string]sarama.MockResponse{ + "MetadataRequest": sarama.NewMockMetadataResponse(t). + SetBroker(mockBroker.Addr(), mockBroker.BrokerID()). + SetLeader(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID()), + "OffsetRequest": sarama.NewMockOffsetResponse(t). + SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetOldest, oldestOffset). + SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetNewest, newestOffset), + "FetchRequest": sarama.NewMockFetchResponse(t, 1). + SetMessage(mockChannel.topic(), mockChannel.partition(), startFrom, message), + }) + + t.Run("Proper", func(t *testing.T) { + producer, err := sarama.NewSyncProducer([]string{mockBroker.Addr()}, mockBrokerConfig) + assert.NoError(t, err, "Expected no error when setting up the sarama SyncProducer") + + parentConsumer, channelConsumer, err := setupConsumerForChannel([]string{mockBroker.Addr()}, mockBrokerConfig, mockChannel, startFrom) + assert.NoError(t, err, "Expected no error when setting up the consumer") + + haltedFlag := false + + errs := closeLoop(mockChannel.topic(), producer, parentConsumer, channelConsumer, &haltedFlag) + + assert.Len(t, errs, 0, "Expected zero errors") + assert.Equal(t, true, haltedFlag, "Expected halted flag to be set to true") + + assert.Panics(t, func() { + channelConsumer.Close() + }) + + assert.NotPanics(t, func() { + parentConsumer.Close() + }) + + // TODO For some reason this panic cannot be captured by the `assert` + // test framework. Not a dealbreaker but need to investigate further. + /* assert.Panics(t, func() { + producer.Close() + }) */ + }) + + t.Run("ChannelConsumerError", func(t *testing.T) { + producer, err := sarama.NewSyncProducer([]string{mockBroker.Addr()}, mockBrokerConfig) + assert.NoError(t, err, "Expected no error when setting up the sarama SyncProducer") + + // Unlike all other tests in this file, forcing an error on the + // channelConsumer.Close() call is more easily achieved using the mock + // Consumer. Thus we bypass the call to `newConsumer` and do + // type-casting. + mockParentConsumer := mocks.NewConsumer(t, nil) + mockParentConsumer.ExpectConsumePartition(mockChannel.topic(), mockChannel.partition(), startFrom).YieldError(sarama.ErrOutOfBrokers) + mockChannelConsumer, err := mockParentConsumer.ConsumePartition(mockChannel.topic(), mockChannel.partition(), startFrom) + assert.NoError(t, err, "Expected no error when setting up the mock partition consumer") + + haltedFlag := false + + errs := closeLoop(mockChannel.topic(), producer, mockParentConsumer, mockChannelConsumer, &haltedFlag) + + assert.Len(t, errs, 1, "Expected 1 error returned") + assert.Equal(t, true, haltedFlag, "Expected halted flag to be set to true") + + assert.NotPanics(t, func() { + mockChannelConsumer.Close() + }) + + assert.NotPanics(t, func() { + mockParentConsumer.Close() + }) + }) +} + +func TestGetLastCutBlockNumber(t *testing.T) { + testCases := []struct { + name string + input uint64 + expected uint64 + }{ + {"Proper", uint64(2), uint64(1)}, + {"Zero", uint64(1), uint64(0)}, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.expected, getLastCutBlockNumber(tc.input)) + }) + } +} + +func TestGetLastOffsetPersisted(t *testing.T) { + mockChannel := newChannel("channelFoo", defaultPartition) + mockMetadata := &cb.Metadata{Value: utils.MarshalOrPanic(&ab.KafkaMetadata{LastOffsetPersisted: int64(5)})} + + testCases := []struct { + name string + md []byte + expected int64 + panics bool + }{ + {"Proper", mockMetadata.Value, int64(5), false}, + {"Empty", nil, sarama.OffsetOldest - 1, false}, + {"Panics", tamperBytes(mockMetadata.Value), sarama.OffsetOldest - 1, true}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if !tc.panics { + assert.Equal(t, tc.expected, getLastOffsetPersisted(tc.md, mockChannel.String())) + } else { + assert.Panics(t, func() { + getLastOffsetPersisted(tc.md, mockChannel.String()) + }, "Expected getLastOffsetPersisted call to panic") + } + }) + } +} + +func TestListenForErrors(t *testing.T) { + mockChannel := newChannel("mockChannelFoo", defaultPartition) + errChan := make(chan *sarama.ConsumerError, 1) + + exitChan1 := make(chan struct{}) + close(exitChan1) + assert.Nil(t, listenForErrors(errChan, exitChan1), "Expected listenForErrors call to return nil") + + exitChan2 := make(chan struct{}) + errChan <- &sarama.ConsumerError{ + Topic: mockChannel.topic(), + Partition: mockChannel.partition(), + Err: fmt.Errorf("foo"), + } + assert.NotNil(t, listenForErrors(errChan, exitChan2), "Expected listenForErrors call to return an error") +} + +func TestProcessLoopConnect(t *testing.T) { + newestOffset := int64(5) + lastCutBlockNumber := uint64(2) + haltedFlag := false + exitChan := make(chan struct{}) + + mockChannel := newChannel("mockChannelFoo", defaultPartition) + + mockBroker := sarama.NewMockBroker(t, 0) + defer func() { mockBroker.Close() }() + + metadataResponse := new(sarama.MetadataResponse) + metadataResponse.AddBroker(mockBroker.Addr(), mockBroker.BrokerID()) + metadataResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID(), nil, nil, sarama.ErrNoError) + mockBroker.Returns(metadataResponse) + + producer, err := sarama.NewSyncProducer([]string{mockBroker.Addr()}, mockBrokerConfig) + assert.NoError(t, err, "Expected no error when setting up the sarama SyncProducer") + + mockBrokerConfigCopy := *mockBrokerConfig + mockBrokerConfigCopy.ChannelBufferSize = 0 + + mockParentConsumer := mocks.NewConsumer(t, &mockBrokerConfigCopy) + mpc := mockParentConsumer.ExpectConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset) + mockChannelConsumer, err := mockParentConsumer.ConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset) + assert.NoError(t, err, "Expected no error when setting up the mock partition consumer") + + mockSupport := &mockmultichain.ConsenterSupport{} + + var counts []uint64 + done := make(chan struct{}) + go func() { + counts, err = processMessagesToBlock(mockSupport, producer, mockParentConsumer, mockChannelConsumer, mockChannel, &lastCutBlockNumber, &haltedFlag, &exitChan) + done <- struct{}{} + }() + + // This is the wrappedMessage that the for loop will process + mpc.YieldMessage(newMockConsumerMessage(newConnectMessage())) + + logger.Debug("Closing exitChan to exit the infinite for loop") + close(exitChan) // Identical to chain.Halt() + logger.Debug("exitChan closed") + <-done + + assert.NoError(t, err, "Expected the processMessagesToBlock call to return without errors") + assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled") + assert.Equal(t, uint64(1), counts[indexProcessConnectPass], "Expected 1 CONNECT message processed") +} + +func TestProcessLoopRegularError(t *testing.T) { + newestOffset := int64(5) + lastCutBlockNumber := uint64(3) + haltedFlag := false + exitChan := make(chan struct{}) + + mockChannel := newChannel("mockChannelFoo", defaultPartition) + + mockBroker := sarama.NewMockBroker(t, 0) + defer func() { mockBroker.Close() }() + + metadataResponse := new(sarama.MetadataResponse) + metadataResponse.AddBroker(mockBroker.Addr(), mockBroker.BrokerID()) + metadataResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID(), nil, nil, sarama.ErrNoError) + mockBroker.Returns(metadataResponse) + + producer, err := sarama.NewSyncProducer([]string{mockBroker.Addr()}, mockBrokerConfig) + assert.NoError(t, err, "Expected no error when setting up the sarama SyncProducer") + + mockBrokerConfigCopy := *mockBrokerConfig + mockBrokerConfigCopy.ChannelBufferSize = 0 + + mockParentConsumer := mocks.NewConsumer(t, &mockBrokerConfigCopy) + mpc := mockParentConsumer.ExpectConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset) + mockChannelConsumer, err := mockParentConsumer.ConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset) + assert.NoError(t, err, "Expected no error when setting up the mock partition consumer") + + mockSupport := &mockmultichain.ConsenterSupport{ + Batches: make(chan []*cb.Envelope), // WriteBlock will post here + BlockCutterVal: mockblockcutter.NewReceiver(), + ChainIDVal: mockChannel.topic(), + HeightVal: lastCutBlockNumber, // Incremented during the WriteBlock call + SharedConfigVal: &mockconfig.Orderer{ + KafkaBrokersVal: []string{mockBroker.Addr()}, + }, + } + defer close(mockSupport.BlockCutterVal.Block) + + var counts []uint64 + done := make(chan struct{}) + go func() { + counts, err = processMessagesToBlock(mockSupport, producer, mockParentConsumer, mockChannelConsumer, mockChannel, &lastCutBlockNumber, &haltedFlag, &exitChan) + done <- struct{}{} + }() + + // This is the wrappedMessage that the for loop will process + mpc.YieldMessage(newMockConsumerMessage(newRegularMessage(tamperBytes(utils.MarshalOrPanic(newMockEnvelope("fooMessage")))))) + + logger.Debug("Closing exitChan to exit the infinite for loop") + close(exitChan) // Identical to chain.Halt() + logger.Debug("exitChan closed") + <-done + + assert.NoError(t, err, "Expected the processMessagesToBlock call to return without errors") + assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled") + assert.Equal(t, uint64(1), counts[indexPprocessRegularError], "Expected 1 damaged REGULAR message processed") +} + +func TestProcessLoopRegularQueueEnvelope(t *testing.T) { + batchTimeout, _ := time.ParseDuration("1s") + newestOffset := int64(5) + lastCutBlockNumber := uint64(3) + haltedFlag := false + exitChan := make(chan struct{}) + + mockChannel := newChannel("mockChannelFoo", defaultPartition) + + mockBroker := sarama.NewMockBroker(t, 0) + defer func() { mockBroker.Close() }() + + metadataResponse := new(sarama.MetadataResponse) + metadataResponse.AddBroker(mockBroker.Addr(), mockBroker.BrokerID()) + metadataResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID(), nil, nil, sarama.ErrNoError) + mockBroker.Returns(metadataResponse) + + producer, err := sarama.NewSyncProducer([]string{mockBroker.Addr()}, mockBrokerConfig) + assert.NoError(t, err, "Expected no error when setting up the sarama SyncProducer") + + mockParentConsumer := mocks.NewConsumer(t, nil) + mockParentConsumer.ExpectConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset). + YieldMessage(newMockConsumerMessage(newRegularMessage(utils.MarshalOrPanic(newMockEnvelope("fooMessage"))))) // This is the wrappedMessage that the for loop will process + mockChannelConsumer, err := mockParentConsumer.ConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset) + assert.NoError(t, err, "Expected no error when setting up the mock partition consumer") + + mockSupport := &mockmultichain.ConsenterSupport{ + Batches: make(chan []*cb.Envelope), // WriteBlock will post here + BlockCutterVal: mockblockcutter.NewReceiver(), + ChainIDVal: mockChannel.topic(), + HeightVal: lastCutBlockNumber, // Incremented during the WriteBlock call + SharedConfigVal: &mockconfig.Orderer{ + BatchTimeoutVal: batchTimeout, + KafkaBrokersVal: []string{mockBroker.Addr()}, + }, + } + defer close(mockSupport.BlockCutterVal.Block) + + go func() { // Note: Unlike the CONNECT test case, the following does NOT introduce a race condition, so we're good + mockSupport.BlockCutterVal.Block <- struct{}{} // Let the `mockblockcutter.Ordered` call return + logger.Debugf("Mock blockcutter's Ordered call has returned") + logger.Debug("Closing exitChan to exit the infinite for loop") // We are guaranteed to hit the exitChan branch after hitting the REGULAR branch at least once + close(exitChan) // Identical to chain.Halt() + logger.Debug("exitChan closed") + }() + + counts, err := processMessagesToBlock(mockSupport, producer, mockParentConsumer, mockChannelConsumer, mockChannel, &lastCutBlockNumber, &haltedFlag, &exitChan) + assert.NoError(t, err, "Expected the processMessagesToBlock call to return without errors") + assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled") + assert.Equal(t, uint64(1), counts[indexProcessRegularPass], "Expected 1 REGULAR message processed") +} + +func TestProcessLoopRegularCutBlock(t *testing.T) { + batchTimeout, _ := time.ParseDuration("1s") + newestOffset := int64(5) + lastCutBlockNumber := uint64(3) + lastCutBlockNumberEnd := lastCutBlockNumber + 1 + haltedFlag := false + exitChan := make(chan struct{}) + + mockChannel := newChannel("mockChannelFoo", defaultPartition) + + mockBroker := sarama.NewMockBroker(t, 0) + defer func() { mockBroker.Close() }() + + metadataResponse := new(sarama.MetadataResponse) + metadataResponse.AddBroker(mockBroker.Addr(), mockBroker.BrokerID()) + metadataResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID(), nil, nil, sarama.ErrNoError) + mockBroker.Returns(metadataResponse) + + producer, err := sarama.NewSyncProducer([]string{mockBroker.Addr()}, mockBrokerConfig) + assert.NoError(t, err, "Expected no error when setting up the sarama SyncProducer") + + mockParentConsumer := mocks.NewConsumer(t, nil) + mockParentConsumer.ExpectConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset). + YieldMessage(newMockConsumerMessage(newRegularMessage(utils.MarshalOrPanic(newMockEnvelope("fooMessage"))))) // This is the wrappedMessage that the for loop will process + mockChannelConsumer, err := mockParentConsumer.ConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset) + assert.NoError(t, err, "Expected no error when setting up the mock partition consumer") + + mockSupport := &mockmultichain.ConsenterSupport{ + Batches: make(chan []*cb.Envelope), // WriteBlock will post here + BlockCutterVal: mockblockcutter.NewReceiver(), + ChainIDVal: mockChannel.topic(), + HeightVal: lastCutBlockNumber, // Incremented during the WriteBlock call + SharedConfigVal: &mockconfig.Orderer{ + BatchTimeoutVal: batchTimeout, + KafkaBrokersVal: []string{mockBroker.Addr()}, + }, + } + defer close(mockSupport.BlockCutterVal.Block) + + mockSupport.BlockCutterVal.CutNext = true + + go func() { // Note: Unlike the CONNECT test case, the following does NOT introduce a race condition, so we're good + mockSupport.BlockCutterVal.Block <- struct{}{} // Let the `mockblockcutter.Ordered` call return + logger.Debugf("Mock blockcutter's Ordered call has returned") + <-mockSupport.Batches // Let the `mockConsenterSupport.WriteBlock` proceed + logger.Debug("Closing exitChan to exit the infinite for loop") // We are guaranteed to hit the exitChan branch after hitting the REGULAR branch at least once + close(exitChan) // Identical to chain.Halt() + logger.Debug("exitChan closed") + }() + + counts, err := processMessagesToBlock(mockSupport, producer, mockParentConsumer, mockChannelConsumer, mockChannel, &lastCutBlockNumber, &haltedFlag, &exitChan) + assert.NoError(t, err, "Expected the processMessagesToBlock call to return without errors") + assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled") + assert.Equal(t, uint64(1), counts[indexProcessRegularPass], "Expected 1 REGULAR message processed") + assert.Equal(t, lastCutBlockNumberEnd, lastCutBlockNumber, "Expected lastCutBlockNumber to be bumped up by one") +} + +func TestProcessLoopRegularCutTwoBlocks(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode") + } + + batchTimeout, _ := time.ParseDuration("100s") // Something big + newestOffset := int64(0) + lastCutBlockNumber := uint64(0) + lastCutBlockNumberEnd := lastCutBlockNumber + 2 + haltedFlag := false + exitChan := make(chan struct{}) + + mockChannel := newChannel("mockChannelFoo", defaultPartition) + + mockBroker := sarama.NewMockBroker(t, 0) + defer func() { mockBroker.Close() }() + + metadataResponse := new(sarama.MetadataResponse) + metadataResponse.AddBroker(mockBroker.Addr(), mockBroker.BrokerID()) + metadataResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID(), nil, nil, sarama.ErrNoError) + mockBroker.Returns(metadataResponse) + + producer, err := sarama.NewSyncProducer([]string{mockBroker.Addr()}, mockBrokerConfig) + assert.NoError(t, err, "Expected no error when setting up the sarama SyncProducer") + + mockParentConsumer := mocks.NewConsumer(t, nil) + mpc := mockParentConsumer.ExpectConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset) + mockChannelConsumer, err := mockParentConsumer.ConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset) + assert.NoError(t, err, "Expected no error when setting up the mock partition consumer") + + mockSupport := &mockmultichain.ConsenterSupport{ + Batches: make(chan []*cb.Envelope), // WriteBlock will post here + BlockCutterVal: mockblockcutter.NewReceiver(), + ChainIDVal: mockChannel.topic(), + HeightVal: lastCutBlockNumber, // Incremented during the WriteBlock call + SharedConfigVal: &mockconfig.Orderer{ + BatchTimeoutVal: batchTimeout, + KafkaBrokersVal: []string{mockBroker.Addr()}, + }, + } + defer close(mockSupport.BlockCutterVal.Block) + + var block1, block2 *cb.Block + + go func() { // Note: Unlike the CONNECT test case, the following does NOT introduce a race condition, so we're good + mpc.YieldMessage(newMockConsumerMessage(newRegularMessage(utils.MarshalOrPanic(newMockEnvelope("fooMessage"))))) // This is the wrappedMessage that the for loop will process) + mockSupport.BlockCutterVal.Block <- struct{}{} + logger.Debugf("Mock blockcutter's Ordered call has returned") + mockSupport.BlockCutterVal.IsolatedTx = true + + mpc.YieldMessage(newMockConsumerMessage(newRegularMessage(utils.MarshalOrPanic(newMockEnvelope("fooMessage"))))) + mockSupport.BlockCutterVal.Block <- struct{}{} // Let the `mockblockcutter.Ordered` call return once again + logger.Debugf("Mock blockcutter's Ordered call has returned for the second time") + + select { + case <-mockSupport.Batches: // Let the `mockConsenterSupport.WriteBlock` proceed + block1 = mockSupport.WriteBlockVal + case <-time.After(hitBranch): + logger.Fatalf("Did not receive a block from the blockcutter as expected") + } + + select { + case <-mockSupport.Batches: + block2 = mockSupport.WriteBlockVal + case <-time.After(hitBranch): + logger.Fatalf("Did not receive a block from the blockcutter as expected") + } + + logger.Debug("Closing exitChan to exit the infinite for loop") // We are guaranteed to hit the exitChan branch after hitting the REGULAR branch at least once + close(exitChan) // Identical to chain.Halt() + logger.Debug("exitChan closed") + }() + + counts, err := processMessagesToBlock(mockSupport, producer, mockParentConsumer, mockChannelConsumer, mockChannel, &lastCutBlockNumber, &haltedFlag, &exitChan) + assert.NoError(t, err, "Expected the processMessagesToBlock call to return without errors") + assert.Equal(t, uint64(2), counts[indexRecvPass], "Expected 2 messages received and unmarshaled") + assert.Equal(t, uint64(2), counts[indexProcessRegularPass], "Expected 2 REGULAR messages processed") + assert.Equal(t, lastCutBlockNumberEnd, lastCutBlockNumber, "Expected lastCutBlockNumber to be bumped up by two") + assert.Equal(t, newestOffset+1, extractEncodedOffset(block1.GetMetadata().Metadata[cb.BlockMetadataIndex_ORDERER]), "Expected encoded offset in first block to be %d", newestOffset+1) + assert.Equal(t, newestOffset+2, extractEncodedOffset(block2.GetMetadata().Metadata[cb.BlockMetadataIndex_ORDERER]), "Expected encoded offset in first block to be %d", newestOffset+2) +} + +func TestProcessLoopRegularAndSendTimeToCutRegular(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode") + } + + batchTimeout, _ := time.ParseDuration("1ms") + newestOffset := int64(5) + lastCutBlockNumber := uint64(3) + lastCutBlockNumberEnd := lastCutBlockNumber + haltedFlag := false + exitChan := make(chan struct{}) + + mockChannel := newChannel("mockChannelFoo", defaultPartition) + + mockBroker := sarama.NewMockBroker(t, 0) + defer func() { mockBroker.Close() }() + + metadataResponse := new(sarama.MetadataResponse) + metadataResponse.AddBroker(mockBroker.Addr(), mockBroker.BrokerID()) + metadataResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID(), nil, nil, sarama.ErrNoError) + mockBroker.Returns(metadataResponse) + + successResponse := new(sarama.ProduceResponse) + successResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), sarama.ErrNoError) + mockBroker.Returns(successResponse) + + producer, err := sarama.NewSyncProducer([]string{mockBroker.Addr()}, mockBrokerConfig) + assert.NoError(t, err, "Expected no error when setting up the sarama SyncProducer") + + mockParentConsumer := mocks.NewConsumer(t, nil) + mockParentConsumer.ExpectConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset). + YieldMessage(newMockConsumerMessage(newRegularMessage(utils.MarshalOrPanic(newMockEnvelope("fooMessage"))))) // This is the wrappedMessage that the for loop will process + mockChannelConsumer, err := mockParentConsumer.ConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset) + assert.NoError(t, err, "Expected no error when setting up the mock partition consumer") + + mockSupport := &mockmultichain.ConsenterSupport{ + Batches: make(chan []*cb.Envelope), // WriteBlock will post here + BlockCutterVal: mockblockcutter.NewReceiver(), + ChainIDVal: mockChannel.topic(), + HeightVal: lastCutBlockNumber, // Incremented during the WriteBlock call + SharedConfigVal: &mockconfig.Orderer{ + BatchTimeoutVal: batchTimeout, + KafkaBrokersVal: []string{mockBroker.Addr()}, + }, + } + defer close(mockSupport.BlockCutterVal.Block) + + go func() { // TODO Hacky, see comments below, revise approach + mockSupport.BlockCutterVal.Block <- struct{}{} // Let the `mockblockcutter.Ordered` call return (in `processRegular`) + logger.Debugf("Mock blockcutter's Ordered call has returned") + time.Sleep(hitBranch) // This introduces a race: we're basically sleeping so as to let select hit the TIMER branch first before the EXITCHAN one + logger.Debug("Closing exitChan to exit the infinite for loop") + close(exitChan) // Identical to chain.Halt() + logger.Debug("exitChan closed") + }() + + counts, err := processMessagesToBlock(mockSupport, producer, mockParentConsumer, mockChannelConsumer, mockChannel, &lastCutBlockNumber, &haltedFlag, &exitChan) + assert.NoError(t, err, "Expected the processMessagesToBlock call to return without errors") + assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled") + assert.Equal(t, uint64(1), counts[indexProcessRegularPass], "Expected 1 REGULAR message processed") + assert.Equal(t, uint64(1), counts[indexSendTimeToCutPass], "Expected 1 TIMER event processed") + assert.Equal(t, lastCutBlockNumberEnd, lastCutBlockNumber, "Expected lastCutBlockNumber to stay the same") +} + +func TestProcessLoopRegularAndSendTimeToCutError(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + batchTimeout, _ := time.ParseDuration("1ms") + newestOffset := int64(5) + lastCutBlockNumber := uint64(3) + lastCutBlockNumberEnd := lastCutBlockNumber + haltedFlag := false + exitChan := make(chan struct{}) + + mockChannel := newChannel("mockChannelFoo", defaultPartition) + + mockBroker := sarama.NewMockBroker(t, 0) + defer func() { mockBroker.Close() }() + + metadataResponse := new(sarama.MetadataResponse) + metadataResponse.AddBroker(mockBroker.Addr(), mockBroker.BrokerID()) + metadataResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID(), nil, nil, sarama.ErrNoError) + mockBroker.Returns(metadataResponse) + + mockBrokerConfigCopy := *mockBrokerConfig + mockBrokerConfigCopy.Net.ReadTimeout = 5 * time.Millisecond + mockBrokerConfigCopy.Consumer.Retry.Backoff = 5 * time.Millisecond + mockBrokerConfigCopy.Metadata.Retry.Max = 1 + + producer, err := sarama.NewSyncProducer([]string{mockBroker.Addr()}, &mockBrokerConfigCopy) + assert.NoError(t, err, "Expected no error when setting up the sarama SyncProducer") + + failureResponse := new(sarama.ProduceResponse) + failureResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), sarama.ErrNotEnoughReplicas) + mockBroker.Returns(failureResponse) + + mockParentConsumer := mocks.NewConsumer(t, nil) + mockParentConsumer.ExpectConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset). + YieldMessage(newMockConsumerMessage(newRegularMessage(utils.MarshalOrPanic(newMockEnvelope("fooMessage"))))) // This is the wrappedMessage that the for loop will process + mockChannelConsumer, err := mockParentConsumer.ConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset) + assert.NoError(t, err, "Expected no error when setting up the mock partition consumer") + + mockSupport := &mockmultichain.ConsenterSupport{ + Batches: make(chan []*cb.Envelope), // WriteBlock will post here + BlockCutterVal: mockblockcutter.NewReceiver(), + ChainIDVal: mockChannel.topic(), + HeightVal: lastCutBlockNumber, // Incremented during the WriteBlock call + SharedConfigVal: &mockconfig.Orderer{ + BatchTimeoutVal: batchTimeout, + KafkaBrokersVal: []string{mockBroker.Addr()}, + }, + } + defer close(mockSupport.BlockCutterVal.Block) + + go func() { // TODO Hacky, see comments below, revise approach + mockSupport.BlockCutterVal.Block <- struct{}{} // Let the `mockblockcutter.Ordered` call return (in `processRegular`) + logger.Debugf("Mock blockcutter's Ordered call has returned") + time.Sleep(hitBranch) // This introduces a race: we're basically sleeping so as to let select hit the TIMER branch first before the EXITCHAN one + logger.Debug("Closing exitChan to exit the infinite for loop") + close(exitChan) // Identical to chain.Halt() + logger.Debug("exitChan closed") + }() + + counts, err := processMessagesToBlock(mockSupport, producer, mockParentConsumer, mockChannelConsumer, mockChannel, &lastCutBlockNumber, &haltedFlag, &exitChan) + assert.NoError(t, err, "Expected the processMessagesToBlock call to return without errors") + assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled") + assert.Equal(t, uint64(1), counts[indexProcessRegularPass], "Expected 1 REGULAR message processed") + assert.Equal(t, uint64(1), counts[indexSendTimeToCutError], "Expected 1 faulty TIMER event processed") + assert.Equal(t, lastCutBlockNumberEnd, lastCutBlockNumber, "Expected lastCutBlockNumber to stay the same") +} + +func TestProcessLoopTimeToCutFromReceivedMessageRegular(t *testing.T) { + newestOffset := int64(5) + lastCutBlockNumber := uint64(3) + lastCutBlockNumberEnd := lastCutBlockNumber + 1 + haltedFlag := false + exitChan := make(chan struct{}) + + mockChannel := newChannel("mockChannelFoo", defaultPartition) + + mockBroker := sarama.NewMockBroker(t, 0) + defer func() { mockBroker.Close() }() + + metadataResponse := new(sarama.MetadataResponse) + metadataResponse.AddBroker(mockBroker.Addr(), mockBroker.BrokerID()) + metadataResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID(), nil, nil, sarama.ErrNoError) + mockBroker.Returns(metadataResponse) + + producer, err := sarama.NewSyncProducer([]string{mockBroker.Addr()}, mockBrokerConfig) + assert.NoError(t, err, "Expected no error when setting up the sarama SyncProducer") + + mockParentConsumer := mocks.NewConsumer(t, nil) + mockParentConsumer.ExpectConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset). + YieldMessage(newMockConsumerMessage(newTimeToCutMessage(lastCutBlockNumber + 1))) // This is the wrappedMessage that the for loop will process + mockChannelConsumer, err := mockParentConsumer.ConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset) + assert.NoError(t, err, "Expected no error when setting up the mock partition consumer") + + mockSupport := &mockmultichain.ConsenterSupport{ + Batches: make(chan []*cb.Envelope), // WriteBlock will post here + BlockCutterVal: mockblockcutter.NewReceiver(), + ChainIDVal: mockChannel.topic(), + HeightVal: lastCutBlockNumber, // Incremented during the WriteBlock call + SharedConfigVal: &mockconfig.Orderer{ + KafkaBrokersVal: []string{mockBroker.Addr()}, + }, + } + defer close(mockSupport.BlockCutterVal.Block) + + // We need the mock blockcutter to deliver a non-empty batch + go func() { + mockSupport.BlockCutterVal.Block <- struct{}{} // Let the `mockblockcutter.Ordered` call below return + }() + mockSupport.BlockCutterVal.Ordered(newMockEnvelope("fooMessage")) + + go func() { // Note: Unlike the CONNECT test case, the following does NOT introduce a race condition, so we're good + <-mockSupport.Batches // Let the `mockConsenterSupport.WriteBlock` proceed + logger.Debug("Closing exitChan to exit the infinite for loop") // We are guaranteed to hit the exitChan branch after hitting the REGULAR branch at least once + close(exitChan) // Identical to chain.Halt() + logger.Debug("exitChan closed") + }() + + counts, err := processMessagesToBlock(mockSupport, producer, mockParentConsumer, mockChannelConsumer, mockChannel, &lastCutBlockNumber, &haltedFlag, &exitChan) + assert.NoError(t, err, "Expected the processMessagesToBlock call to return without errors") + assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled") + assert.Equal(t, uint64(1), counts[indexProcessTimeToCutPass], "Expected 1 TIMETOCUT message processed") + assert.Equal(t, lastCutBlockNumberEnd, lastCutBlockNumber, "Expected lastCutBlockNumber to be bumped up by one") +} + +func TestProcessLoopTimeToCutFromReceivedMessageZeroBatch(t *testing.T) { + newestOffset := int64(5) + lastCutBlockNumber := uint64(3) + lastCutBlockNumberEnd := lastCutBlockNumber + haltedFlag := false + exitChan := make(chan struct{}) + + mockChannel := newChannel("mockChannelFoo", defaultPartition) + + mockBroker := sarama.NewMockBroker(t, 0) + defer func() { mockBroker.Close() }() + + metadataResponse := new(sarama.MetadataResponse) + metadataResponse.AddBroker(mockBroker.Addr(), mockBroker.BrokerID()) + metadataResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID(), nil, nil, sarama.ErrNoError) + mockBroker.Returns(metadataResponse) + + producer, err := sarama.NewSyncProducer([]string{mockBroker.Addr()}, mockBrokerConfig) + assert.NoError(t, err, "Expected no error when setting up the sarama SyncProducer") + + mockParentConsumer := mocks.NewConsumer(t, nil) + mockParentConsumer.ExpectConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset). + YieldMessage(newMockConsumerMessage(newTimeToCutMessage(lastCutBlockNumber + 1))) // This is the wrappedMessage that the for loop will process + mockChannelConsumer, err := mockParentConsumer.ConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset) + assert.NoError(t, err, "Expected no error when setting up the mock partition consumer") + + mockSupport := &mockmultichain.ConsenterSupport{ + Batches: make(chan []*cb.Envelope), // WriteBlock will post here + BlockCutterVal: mockblockcutter.NewReceiver(), + ChainIDVal: mockChannel.topic(), + HeightVal: lastCutBlockNumber, // Incremented during the WriteBlock call + SharedConfigVal: &mockconfig.Orderer{ + KafkaBrokersVal: []string{mockBroker.Addr()}, + }, + } + defer close(mockSupport.BlockCutterVal.Block) + + counts, err := processMessagesToBlock(mockSupport, producer, mockParentConsumer, mockChannelConsumer, mockChannel, &lastCutBlockNumber, &haltedFlag, &exitChan) + assert.Error(t, err, "Expected the processMessagesToBlock call to return an error") + assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled") + assert.Equal(t, uint64(1), counts[indexProcessTimeToCutError], "Expected 1 faulty TIMETOCUT message processed") + assert.Equal(t, lastCutBlockNumberEnd, lastCutBlockNumber, "Expected lastCutBlockNumber to stay the same") +} + +func TestProcessLoopTimeToCutFromReceivedMessageLargerThanExpected(t *testing.T) { + newestOffset := int64(5) + lastCutBlockNumber := uint64(3) + lastCutBlockNumberEnd := lastCutBlockNumber + haltedFlag := false + exitChan := make(chan struct{}) + + mockChannel := newChannel("mockChannelFoo", defaultPartition) + + mockBroker := sarama.NewMockBroker(t, 0) + defer func() { mockBroker.Close() }() + + metadataResponse := new(sarama.MetadataResponse) + metadataResponse.AddBroker(mockBroker.Addr(), mockBroker.BrokerID()) + metadataResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID(), nil, nil, sarama.ErrNoError) + mockBroker.Returns(metadataResponse) + + producer, err := sarama.NewSyncProducer([]string{mockBroker.Addr()}, mockBrokerConfig) + assert.NoError(t, err, "Expected no error when setting up the sarama SyncProducer") + + mockParentConsumer := mocks.NewConsumer(t, nil) + mockParentConsumer.ExpectConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset). + YieldMessage(newMockConsumerMessage(newTimeToCutMessage(lastCutBlockNumber + 2))) // This is the wrappedMessage that the for loop will process + mockChannelConsumer, err := mockParentConsumer.ConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset) + assert.NoError(t, err, "Expected no error when setting up the mock partition consumer") + + mockSupport := &mockmultichain.ConsenterSupport{ + Batches: make(chan []*cb.Envelope), // WriteBlock will post here + BlockCutterVal: mockblockcutter.NewReceiver(), + ChainIDVal: mockChannel.topic(), + HeightVal: lastCutBlockNumber, // Incremented during the WriteBlock call + SharedConfigVal: &mockconfig.Orderer{ + KafkaBrokersVal: []string{mockBroker.Addr()}, + }, + } + defer close(mockSupport.BlockCutterVal.Block) + + counts, err := processMessagesToBlock(mockSupport, producer, mockParentConsumer, mockChannelConsumer, mockChannel, &lastCutBlockNumber, &haltedFlag, &exitChan) + assert.Error(t, err, "Expected the processMessagesToBlock call to return an error") + assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled") + assert.Equal(t, uint64(1), counts[indexProcessTimeToCutError], "Expected 1 faulty TIMETOCUT message processed") + assert.Equal(t, lastCutBlockNumberEnd, lastCutBlockNumber, "Expected lastCutBlockNumber to stay the same") +} + +func TestProcessLoopTimeToCutFromReceivedMessageStale(t *testing.T) { + newestOffset := int64(5) + lastCutBlockNumber := uint64(3) + lastCutBlockNumberEnd := lastCutBlockNumber + haltedFlag := false + exitChan := make(chan struct{}) + + mockChannel := newChannel("mockChannelFoo", defaultPartition) + + mockBroker := sarama.NewMockBroker(t, 0) + defer func() { mockBroker.Close() }() + + metadataResponse := new(sarama.MetadataResponse) + metadataResponse.AddBroker(mockBroker.Addr(), mockBroker.BrokerID()) + metadataResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID(), nil, nil, sarama.ErrNoError) + mockBroker.Returns(metadataResponse) + + producer, err := sarama.NewSyncProducer([]string{mockBroker.Addr()}, mockBrokerConfig) + assert.NoError(t, err, "Expected no error when setting up the sarama SyncProducer") + + mockBrokerConfigCopy := *mockBrokerConfig + mockBrokerConfigCopy.ChannelBufferSize = 0 + + mockParentConsumer := mocks.NewConsumer(t, &mockBrokerConfigCopy) + mpc := mockParentConsumer.ExpectConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset) + mockChannelConsumer, err := mockParentConsumer.ConsumePartition(mockChannel.topic(), mockChannel.partition(), newestOffset) + assert.NoError(t, err, "Expected no error when setting up the mock partition consumer") + + mockSupport := &mockmultichain.ConsenterSupport{ + Batches: make(chan []*cb.Envelope), // WriteBlock will post here + BlockCutterVal: mockblockcutter.NewReceiver(), + ChainIDVal: mockChannel.topic(), + HeightVal: lastCutBlockNumber, // Incremented during the WriteBlock call + SharedConfigVal: &mockconfig.Orderer{ + KafkaBrokersVal: []string{mockBroker.Addr()}, + }, + } + defer close(mockSupport.BlockCutterVal.Block) + + var counts []uint64 + done := make(chan struct{}) + go func() { + counts, err = processMessagesToBlock(mockSupport, producer, mockParentConsumer, mockChannelConsumer, mockChannel, &lastCutBlockNumber, &haltedFlag, &exitChan) + done <- struct{}{} + }() + + // This is the wrappedMessage that the for loop will process + mpc.YieldMessage(newMockConsumerMessage(newTimeToCutMessage(lastCutBlockNumber))) + + logger.Debug("Closing exitChan to exit the infinite for loop") + close(exitChan) // Identical to chain.Halt() + logger.Debug("exitChan closed") + <-done + + assert.NoError(t, err, "Expected the processMessagesToBlock call to return without errors") + assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled") + assert.Equal(t, uint64(1), counts[indexProcessTimeToCutPass], "Expected 1 TIMETOCUT message processed") + assert.Equal(t, lastCutBlockNumberEnd, lastCutBlockNumber, "Expected lastCutBlockNumber to stay the same") +} + +func TestSendConnectMessage(t *testing.T) { + mockChannel := newChannel("mockChannelFoo", defaultPartition) + mockBroker := sarama.NewMockBroker(t, 0) + defer func() { mockBroker.Close() }() + + metadataResponse := new(sarama.MetadataResponse) + metadataResponse.AddBroker(mockBroker.Addr(), mockBroker.BrokerID()) + metadataResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID(), nil, nil, sarama.ErrNoError) + mockBroker.Returns(metadataResponse) + + mockBrokerConfigCopy := *mockBrokerConfig + mockBrokerConfigCopy.Net.ReadTimeout = 5 * time.Millisecond + mockBrokerConfigCopy.Consumer.Retry.Backoff = 5 * time.Millisecond + mockBrokerConfigCopy.Metadata.Retry.Max = 1 + + producer, err := sarama.NewSyncProducer([]string{mockBroker.Addr()}, &mockBrokerConfigCopy) + assert.NoError(t, err, "Expected no error when setting up the sarama SyncProducer") + + t.Run("Proper", func(t *testing.T) { + successResponse := new(sarama.ProduceResponse) + successResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), sarama.ErrNoError) + mockBroker.Returns(successResponse) + + assert.NoError(t, sendConnectMessage(producer, mockChannel), "Expected the sendConnectMessage call to return without errors") + }) + + t.Run("WithError", func(t *testing.T) { + failureResponse := new(sarama.ProduceResponse) + failureResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), sarama.ErrNotEnoughReplicas) + mockBroker.Returns(failureResponse) + err := sendConnectMessage(producer, mockChannel) + assert.Error(t, err, "Expected the sendConnectMessage call to return an error") + }) +} + +func TestSendTimeToCut(t *testing.T) { + mockChannel := newChannel("mockChannelFoo", defaultPartition) + mockBroker := sarama.NewMockBroker(t, 0) + defer func() { mockBroker.Close() }() + + metadataResponse := new(sarama.MetadataResponse) + metadataResponse.AddBroker(mockBroker.Addr(), mockBroker.BrokerID()) + metadataResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID(), nil, nil, sarama.ErrNoError) + mockBroker.Returns(metadataResponse) + + mockBrokerConfigCopy := *mockBrokerConfig + mockBrokerConfigCopy.Net.ReadTimeout = 5 * time.Millisecond + mockBrokerConfigCopy.Consumer.Retry.Backoff = 5 * time.Millisecond + mockBrokerConfigCopy.Metadata.Retry.Max = 1 + + producer, err := sarama.NewSyncProducer([]string{mockBroker.Addr()}, &mockBrokerConfigCopy) + assert.NoError(t, err, "Expected no error when setting up the sarama SyncProducer") + + timeToCutBlockNumber := uint64(3) + var timer <-chan time.Time + + t.Run("Proper", func(t *testing.T) { + successResponse := new(sarama.ProduceResponse) + successResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), sarama.ErrNoError) + mockBroker.Returns(successResponse) + + timer = time.After(1 * time.Hour) // Just a very long amount of time + + assert.NoError(t, sendTimeToCut(producer, mockChannel, timeToCutBlockNumber, &timer), "Expected the sendTimeToCut call to return without errors") + assert.Nil(t, timer, "Expected the sendTimeToCut call to nil the timer") + }) + + t.Run("WithError", func(t *testing.T) { + failureResponse := new(sarama.ProduceResponse) + failureResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), sarama.ErrNotEnoughReplicas) + mockBroker.Returns(failureResponse) + + timer = time.After(1 * time.Hour) // Just a very long amount of time + + assert.Error(t, sendTimeToCut(producer, mockChannel, timeToCutBlockNumber, &timer), "Expected the sendTimeToCut call to return an error") + assert.Nil(t, timer, "Expected the sendTimeToCut call to nil the timer") + }) +} + +func TestSetupConsumerForChannel(t *testing.T) { + startFrom := int64(3) + oldestOffset := int64(0) + newestOffset := int64(5) + + mockChannel := newChannel("channelFoo", defaultPartition) + message := sarama.StringEncoder("messageFoo") + + mockBroker := sarama.NewMockBroker(t, 0) + defer func() { mockBroker.Close() }() + mockBroker.SetHandlerByMap(map[string]sarama.MockResponse{ + "MetadataRequest": sarama.NewMockMetadataResponse(t). + SetBroker(mockBroker.Addr(), mockBroker.BrokerID()). + SetLeader(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID()), + "OffsetRequest": sarama.NewMockOffsetResponse(t). + SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetOldest, oldestOffset). + SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetNewest, newestOffset), + "FetchRequest": sarama.NewMockFetchResponse(t, 1). + SetMessage(mockChannel.topic(), mockChannel.partition(), startFrom, message), + }) + + t.Run("Proper", func(t *testing.T) { + parentConsumer, channelConsumer, err := setupConsumerForChannel([]string{mockBroker.Addr()}, mockBrokerConfig, mockChannel, startFrom) + assert.NoError(t, err, "Expected the setupConsumerForChannel call to return without errors") + assert.NoError(t, channelConsumer.Close(), "Expected to close the channelConsumer without errors") + assert.NoError(t, parentConsumer.Close(), "Expected to close the parentConsumer without errors") + }) + + t.Run("WithParentConsumerError", func(t *testing.T) { + // Provide an empty brokers list + parentConsumer, channelConsumer, err := setupConsumerForChannel([]string{}, mockBrokerConfig, mockChannel, startFrom) + defer func() { + if err == nil { + channelConsumer.Close() + parentConsumer.Close() + } + }() + assert.Error(t, err, "Expected the setupConsumerForChannel call to return an error") + }) + + t.Run("WithChannelConsumerError", func(t *testing.T) { + // Provide an out-of-range offset + parentConsumer, channelConsumer, err := setupConsumerForChannel([]string{mockBroker.Addr()}, mockBrokerConfig, mockChannel, newestOffset+1) + defer func() { + if err == nil { + channelConsumer.Close() + parentConsumer.Close() + } + }() + assert.Error(t, err, "Expected the setupConsumerForChannel call to return an error") + }) +} + +func TestSetupProducerForChannel(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode") + } + + mockChannel := newChannel("channelFoo", defaultPartition) + + mockBroker := sarama.NewMockBroker(t, 0) + defer mockBroker.Close() + + t.Run("Proper", func(t *testing.T) { + metadataResponse := new(sarama.MetadataResponse) + metadataResponse.AddBroker(mockBroker.Addr(), mockBroker.BrokerID()) + metadataResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID(), nil, nil, sarama.ErrNoError) + mockBroker.Returns(metadataResponse) + + producer, err := setupProducerForChannel([]string{mockBroker.Addr()}, mockBrokerConfig, mockChannel, mockConsenter.retryOptions()) + assert.NoError(t, err, "Expected the setupProducerForChannel call to return without errors") + assert.NoError(t, producer.Close(), "Expected to close the producer without errors") + }) + + t.Run("WithError", func(t *testing.T) { + _, err := setupProducerForChannel([]string{}, mockBrokerConfig, mockChannel, mockConsenter.retryOptions()) + assert.Error(t, err, "Expected the setupProducerForChannel call to return an error") + }) +} diff --git a/orderer/kafka/channel.go b/orderer/kafka/channel.go new file mode 100644 index 00000000000..bf5b5cb1da7 --- /dev/null +++ b/orderer/kafka/channel.go @@ -0,0 +1,58 @@ +/* +Copyright IBM Corp. 2016 All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kafka + +import "fmt" + +const defaultPartition = 0 + +// channel identifies the Kafka partition the Kafka-based orderer interacts +// with. +type channel interface { + topic() string + partition() int32 + fmt.Stringer +} + +type channelImpl struct { + tpc string + prt int32 +} + +// Returns a new channel for a given topic name and partition number. +func newChannel(topic string, partition int32) channel { + return &channelImpl{ + tpc: fmt.Sprintf("%s", topic), + prt: partition, + } +} + +// topic returns the Kafka topic this channel belongs to. +func (chn *channelImpl) topic() string { + return chn.tpc +} + +// partition returns the Kafka partition where this channel resides. +func (chn *channelImpl) partition() int32 { + return chn.prt +} + +// String returns a string identifying the Kafka topic/partition corresponding +// to this channel. +func (chn *channelImpl) String() string { + return fmt.Sprintf("%s/%d", chn.tpc, chn.prt) +} diff --git a/orderer/kafka/producer_test.go b/orderer/kafka/channel_test.go similarity index 53% rename from orderer/kafka/producer_test.go rename to orderer/kafka/channel_test.go index 360ed38a14c..b838ee7fa33 100644 --- a/orderer/kafka/producer_test.go +++ b/orderer/kafka/channel_test.go @@ -17,21 +17,20 @@ limitations under the License. package kafka import ( + "fmt" "testing" - ab "github.com/hyperledger/fabric/protos/orderer" - "github.com/hyperledger/fabric/protos/utils" + "github.com/stretchr/testify/assert" ) -func TestProducerSend(t *testing.T) { - mp := mockNewProducer(t, cp, testMiddleOffset, make(chan *ab.KafkaMessage)) - defer testClose(t, mp) +func TestChannel(t *testing.T) { + chn := newChannel("channelFoo", defaultPartition) - go func() { - <-mp.(*mockProducerImpl).disk // Retrieve the message that we'll be sending below - }() + expectedTopic := fmt.Sprintf("%s", "channelFoo") + actualTopic := chn.topic() + assert.Equal(t, expectedTopic, actualTopic, "Got the wrong topic, expected %s, got %s instead", expectedTopic, actualTopic) - if err := mp.Send(cp, utils.MarshalOrPanic(newRegularMessage([]byte("foo")))); err != nil { - t.Fatalf("Mock producer was not initialized correctly: %s", err) - } + expectedPartition := int32(defaultPartition) + actualPartition := chn.partition() + assert.Equal(t, expectedPartition, actualPartition, "Got the wrong partition, expected %d, got %d instead", expectedPartition, actualPartition) } diff --git a/orderer/kafka/config.go b/orderer/kafka/config.go new file mode 100644 index 00000000000..3c475d1ac15 --- /dev/null +++ b/orderer/kafka/config.go @@ -0,0 +1,66 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package kafka + +import ( + "crypto/tls" + "crypto/x509" + + "github.com/Shopify/sarama" + localconfig "github.com/hyperledger/fabric/orderer/localconfig" +) + +func newBrokerConfig(tlsConfig localconfig.TLS, retryOptions localconfig.Retry, kafkaVersion sarama.KafkaVersion, chosenStaticPartition int32) *sarama.Config { + brokerConfig := sarama.NewConfig() + + // FIXME https://jira.hyperledger.org/browse/FAB-4136 + // Use retryOptions to populate `Net` + + // Allows us to retrieve errors that occur when consuming a channel, via the + // channel's `listenForErrors` goroutine. + brokerConfig.Consumer.Return.Errors = true + + brokerConfig.Net.TLS.Enable = tlsConfig.Enabled + if brokerConfig.Net.TLS.Enable { + // create public/private key pair structure + keyPair, err := tls.X509KeyPair([]byte(tlsConfig.Certificate), []byte(tlsConfig.PrivateKey)) + if err != nil { + logger.Panic("Unable to decode public/private key pair:", err) + } + // create root CA pool + rootCAs := x509.NewCertPool() + for _, certificate := range tlsConfig.RootCAs { + if !rootCAs.AppendCertsFromPEM([]byte(certificate)) { + logger.Panic("Unable to parse the root certificate authority certificates (Kafka.Tls.RootCAs)") + } + } + brokerConfig.Net.TLS.Config = &tls.Config{ + Certificates: []tls.Certificate{keyPair}, + RootCAs: rootCAs, + MinVersion: tls.VersionTLS12, + MaxVersion: 0, // Latest supported TLS version + } + } + + // Set equivalent of Kafka producer config max.request.bytes to the default + // value of a Kafka broker's socket.request.max.bytes property (100 MiB). + brokerConfig.Producer.MaxMessageBytes = int(sarama.MaxRequestSize) // FIXME https://jira.hyperledger.org/browse/FAB-4083 + // A partitioner is actually not needed the way we do things now, + // but we're adding it now to allow for flexibility in the future. + brokerConfig.Producer.Partitioner = newStaticPartitioner(chosenStaticPartition) + // Set the level of acknowledgement reliability needed from the broker. + // WaitForAll means that the partition leader will wait till all ISRs got + // the message before sending back an ACK to the sender. + brokerConfig.Producer.RequiredAcks = sarama.WaitForAll + // An esoteric setting required by the sarama library, see: + // https://github.com/Shopify/sarama/issues/816 + brokerConfig.Producer.Return.Successes = true + + brokerConfig.Version = kafkaVersion + + return brokerConfig +} diff --git a/orderer/kafka/config_test.go b/orderer/kafka/config_test.go index 329cfac5798..888ce6f3bd7 100644 --- a/orderer/kafka/config_test.go +++ b/orderer/kafka/config_test.go @@ -1,59 +1,144 @@ /* -Copyright IBM Corp. 2016 All Rights Reserved. +Copyright IBM Corp. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. +SPDX-License-Identifier: Apache-2.0 */ package kafka import ( + "crypto/tls" "testing" - "time" "github.com/Shopify/sarama" - "github.com/hyperledger/fabric/orderer/localconfig" - cb "github.com/hyperledger/fabric/protos/common" + localconfig "github.com/hyperledger/fabric/orderer/localconfig" + "github.com/hyperledger/fabric/orderer/mocks/util" + "github.com/stretchr/testify/assert" ) -var ( - testBrokerID = int32(0) - testOldestOffset = int64(100) // The oldest block available on the broker - testNewestOffset = int64(1100) // The offset that will be assigned to the next block - testMiddleOffset = (testOldestOffset + testNewestOffset - 1) / 2 // Just an offset in the middle +func TestBrokerConfig(t *testing.T) { + mockChannel1 := newChannel("channelFoo", defaultPartition) + // Use a partition ID that is not the 'default' (defaultPartition) + var differentPartition int32 = defaultPartition + 1 + mockChannel2 := newChannel("channelFoo", differentPartition) - // Amount of time to wait for block processing when doing time-based tests - // We generally want this value to be as small as possible so as to make tests execute faster - // But this may have to be bumped up in slower machines - testTimePadding = 200 * time.Millisecond -) + mockBroker := sarama.NewMockBroker(t, 0) + defer func() { mockBroker.Close() }() -var testConf = &config.TopLevel{ - Kafka: config.Kafka{ - Retry: config.Retry{ - Period: 3 * time.Second, - Stop: 60 * time.Second, - }, - Verbose: false, - Version: sarama.V0_9_0_1, - }, -} + mockBroker.SetHandlerByMap(map[string]sarama.MockResponse{ + "MetadataRequest": sarama.NewMockMetadataResponse(t). + SetBroker(mockBroker.Addr(), mockBroker.BrokerID()). + SetLeader(mockChannel1.topic(), mockChannel1.partition(), mockBroker.BrokerID()). + SetLeader(mockChannel2.topic(), mockChannel2.partition(), mockBroker.BrokerID()), + "ProduceRequest": sarama.NewMockProduceResponse(t), + }) + + t.Run("New", func(t *testing.T) { + producer, err := sarama.NewSyncProducer([]string{mockBroker.Addr()}, mockBrokerConfig) + assert.NoError(t, err, "Failed to create producer with given config:", err) + producer.Close() + }) -func testClose(t *testing.T, x Closeable) { - if err := x.Close(); err != nil { - t.Fatal("Cannot close mock resource:", err) + t.Run("Partitioner", func(t *testing.T) { + mockBrokerConfig2 := newBrokerConfig(mockLocalConfig.General.TLS, mockLocalConfig.Kafka.Retry, mockLocalConfig.Kafka.Version, differentPartition) + producer, _ := sarama.NewSyncProducer([]string{mockBroker.Addr()}, mockBrokerConfig2) + defer func() { producer.Close() }() + + for i := 0; i < 10; i++ { + assignedPartition, _, err := producer.SendMessage(&sarama.ProducerMessage{Topic: mockChannel2.topic()}) + assert.NoError(t, err, "Failed to send message:", err) + assert.Equal(t, differentPartition, assignedPartition, "Message wasn't posted to the right partition - expected %d, got %v", differentPartition, assignedPartition) + } + }) + + producer, _ := sarama.NewSyncProducer([]string{mockBroker.Addr()}, mockBrokerConfig) + defer func() { producer.Close() }() + + testCases := []struct { + name string + size int + err error + }{ + {"TypicalDeploy", 4 * 1024, nil}, + {"TooBig", int(sarama.MaxRequestSize + 1), sarama.ErrMessageSizeTooLarge}, } + + for _, tc := range testCases { + t.Run("ProducerMessageMaxBytes"+tc.name, func(t *testing.T) { + _, _, err := producer.SendMessage(&sarama.ProducerMessage{ + Topic: mockChannel1.topic(), + Value: sarama.ByteEncoder(make([]byte, tc.size)), + }) + assert.Equal(t, tc.err, err) + }) + } +} + +func TestBrokerConfigTLSConfigEnabled(t *testing.T) { + publicKey, privateKey, _ := util.GenerateMockPublicPrivateKeyPairPEM(false) + caPublicKey, _, _ := util.GenerateMockPublicPrivateKeyPairPEM(true) + + t.Run("Enabled", func(t *testing.T) { + testBrokerConfig := newBrokerConfig(localconfig.TLS{ + Enabled: true, + PrivateKey: privateKey, + Certificate: publicKey, + RootCAs: []string{caPublicKey}, + }, mockLocalConfig.Kafka.Retry, mockLocalConfig.Kafka.Version, defaultPartition) + + assert.True(t, testBrokerConfig.Net.TLS.Enable) + assert.NotNil(t, testBrokerConfig.Net.TLS.Config) + assert.Len(t, testBrokerConfig.Net.TLS.Config.Certificates, 1) + assert.Len(t, testBrokerConfig.Net.TLS.Config.RootCAs.Subjects(), 1) + assert.Equal(t, uint16(0), testBrokerConfig.Net.TLS.Config.MaxVersion) + assert.Equal(t, uint16(tls.VersionTLS12), testBrokerConfig.Net.TLS.Config.MinVersion) + }) + + t.Run("Disabled", func(t *testing.T) { + testBrokerConfig := newBrokerConfig(localconfig.TLS{ + Enabled: false, + PrivateKey: privateKey, + Certificate: publicKey, + RootCAs: []string{caPublicKey}, + }, mockLocalConfig.Kafka.Retry, mockLocalConfig.Kafka.Version, defaultPartition) + + assert.False(t, testBrokerConfig.Net.TLS.Enable) + assert.Zero(t, testBrokerConfig.Net.TLS.Config) + }) } -func newTestEnvelope(content string) *cb.Envelope { - return &cb.Envelope{Payload: []byte(content)} +func TestBrokerConfigTLSConfigBadCert(t *testing.T) { + publicKey, privateKey, _ := util.GenerateMockPublicPrivateKeyPairPEM(false) + caPublicKey, _, _ := util.GenerateMockPublicPrivateKeyPairPEM(true) + + t.Run("BadPrivateKey", func(t *testing.T) { + assert.Panics(t, func() { + newBrokerConfig(localconfig.TLS{ + Enabled: true, + PrivateKey: privateKey, + Certificate: "TRASH", + RootCAs: []string{caPublicKey}, + }, mockLocalConfig.Kafka.Retry, mockLocalConfig.Kafka.Version, defaultPartition) + }) + }) + t.Run("BadPublicKey", func(t *testing.T) { + assert.Panics(t, func() { + newBrokerConfig(localconfig.TLS{ + Enabled: true, + PrivateKey: "TRASH", + Certificate: publicKey, + RootCAs: []string{caPublicKey}, + }, mockLocalConfig.Kafka.Retry, mockLocalConfig.Kafka.Version, defaultPartition) + }) + }) + t.Run("BadRootCAs", func(t *testing.T) { + assert.Panics(t, func() { + newBrokerConfig(localconfig.TLS{ + Enabled: true, + PrivateKey: privateKey, + Certificate: publicKey, + RootCAs: []string{"TRASH"}, + }, mockLocalConfig.Kafka.Retry, mockLocalConfig.Kafka.Version, defaultPartition) + }) + }) } diff --git a/orderer/kafka/consenter.go b/orderer/kafka/consenter.go new file mode 100644 index 00000000000..645f157c599 --- /dev/null +++ b/orderer/kafka/consenter.go @@ -0,0 +1,76 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package kafka + +import ( + "github.com/Shopify/sarama" + "github.com/hyperledger/fabric/common/flogging" + localconfig "github.com/hyperledger/fabric/orderer/localconfig" + "github.com/hyperledger/fabric/orderer/multichain" + cb "github.com/hyperledger/fabric/protos/common" + logging "github.com/op/go-logging" +) + +const pkgLogID = "orderer/kafka" + +var logger *logging.Logger + +func init() { + logger = flogging.MustGetLogger(pkgLogID) +} + +// New creates a Kafka-based consenter. Called by orderer's main.go. +func New(tlsConfig localconfig.TLS, retryOptions localconfig.Retry, kafkaVersion sarama.KafkaVersion) multichain.Consenter { + brokerConfig := newBrokerConfig(tlsConfig, retryOptions, kafkaVersion, defaultPartition) + return &consenterImpl{ + brokerConfigVal: brokerConfig, + tlsConfigVal: tlsConfig, + retryOptionsVal: retryOptions, + kafkaVersionVal: kafkaVersion} +} + +// consenterImpl holds the implementation of type that satisfies the +// multichain.Consenter interface --as the HandleChain contract requires-- and +// the commonConsenter one. +type consenterImpl struct { + brokerConfigVal *sarama.Config + tlsConfigVal localconfig.TLS + retryOptionsVal localconfig.Retry + kafkaVersionVal sarama.KafkaVersion +} + +// HandleChain creates/returns a reference to a multichain.Chain object for the +// given set of support resources. Implements the multichain.Consenter +// interface. Called by multichain.newChainSupport(), which is itself called by +// multichain.NewManagerImpl() when ranging over the ledgerFactory's +// existingChains. +func (consenter *consenterImpl) HandleChain(support multichain.ConsenterSupport, metadata *cb.Metadata) (multichain.Chain, error) { + lastOffsetPersisted := getLastOffsetPersisted(metadata.Value, support.ChainID()) + return newChain(consenter, support, lastOffsetPersisted) +} + +// commonConsenter allows us to retrieve the configuration options set on the +// consenter object. These will be common across all chain objects derived by +// this consenter. They are set using using local configuration settings. This +// interface is satisfied by consenterImpl. +type commonConsenter interface { + brokerConfig() *sarama.Config + retryOptions() localconfig.Retry +} + +func (consenter *consenterImpl) brokerConfig() *sarama.Config { + return consenter.brokerConfigVal +} + +func (consenter *consenterImpl) retryOptions() localconfig.Retry { + return consenter.retryOptionsVal +} + +// closeable allows the shut down of the calling resource. +type closeable interface { + close() error +} diff --git a/orderer/kafka/consenter_test.go b/orderer/kafka/consenter_test.go new file mode 100644 index 00000000000..ec02f0c8567 --- /dev/null +++ b/orderer/kafka/consenter_test.go @@ -0,0 +1,154 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package kafka + +import ( + "log" + "os" + "testing" + "time" + + "github.com/Shopify/sarama" + "github.com/golang/protobuf/proto" + "github.com/hyperledger/fabric/common/flogging" + mockconfig "github.com/hyperledger/fabric/common/mocks/config" + localconfig "github.com/hyperledger/fabric/orderer/localconfig" + mockblockcutter "github.com/hyperledger/fabric/orderer/mocks/blockcutter" + mockmultichain "github.com/hyperledger/fabric/orderer/mocks/multichain" + "github.com/hyperledger/fabric/orderer/multichain" + cb "github.com/hyperledger/fabric/protos/common" + ab "github.com/hyperledger/fabric/protos/orderer" + "github.com/hyperledger/fabric/protos/utils" + "github.com/stretchr/testify/assert" +) + +func init() { + mockLocalConfig = newMockLocalConfig(false, 50, 200, false) + mockBrokerConfig = newMockBrokerConfig(mockLocalConfig.General.TLS, mockLocalConfig.Kafka.Retry, mockLocalConfig.Kafka.Version, defaultPartition) + mockConsenter = newMockConsenter(mockBrokerConfig, mockLocalConfig.General.TLS, mockLocalConfig.Kafka.Retry, mockLocalConfig.Kafka.Version) + setupTestLogging("ERROR", mockLocalConfig.Kafka.Verbose) +} + +func TestNew(t *testing.T) { + _ = multichain.Consenter(New(mockLocalConfig.General.TLS, mockLocalConfig.Kafka.Retry, mockLocalConfig.Kafka.Version)) +} + +func TestHandleChain(t *testing.T) { + consenter := multichain.Consenter(New(mockLocalConfig.General.TLS, mockLocalConfig.Kafka.Retry, mockLocalConfig.Kafka.Version)) + + oldestOffset := int64(0) + newestOffset := int64(5) + message := sarama.StringEncoder("messageFoo") + + mockChannel := newChannel("channelFoo", defaultPartition) + + mockBroker := sarama.NewMockBroker(t, 0) + mockBroker.SetHandlerByMap(map[string]sarama.MockResponse{ + "MetadataRequest": sarama.NewMockMetadataResponse(t). + SetBroker(mockBroker.Addr(), mockBroker.BrokerID()). + SetLeader(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID()), + "ProduceRequest": sarama.NewMockProduceResponse(t). + SetError(mockChannel.topic(), mockChannel.partition(), sarama.ErrNoError), + "OffsetRequest": sarama.NewMockOffsetResponse(t). + SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetOldest, oldestOffset). + SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetNewest, newestOffset), + "FetchRequest": sarama.NewMockFetchResponse(t, 1). + SetMessage(mockChannel.topic(), mockChannel.partition(), newestOffset, message), + }) + + mockSupport := &mockmultichain.ConsenterSupport{ + ChainIDVal: mockChannel.topic(), + SharedConfigVal: &mockconfig.Orderer{ + KafkaBrokersVal: []string{mockBroker.Addr()}, + }, + } + + mockMetadata := &cb.Metadata{Value: utils.MarshalOrPanic(&ab.KafkaMetadata{LastOffsetPersisted: newestOffset - 1})} + + _, err := consenter.HandleChain(mockSupport, mockMetadata) + assert.NoError(t, err, "Expected the HandleChain call to return without errors") +} + +// Test helper functions and mock objects defined here + +var mockConsenter commonConsenter +var mockLocalConfig *localconfig.TopLevel +var mockBrokerConfig *sarama.Config + +func extractEncodedOffset(marshalledOrdererMetadata []byte) int64 { + omd := &cb.Metadata{} + _ = proto.Unmarshal(marshalledOrdererMetadata, omd) + kmd := &ab.KafkaMetadata{} + _ = proto.Unmarshal(omd.GetValue(), kmd) + return kmd.LastOffsetPersisted +} + +func newMockBrokerConfig(tlsConfig localconfig.TLS, retryOptions localconfig.Retry, kafkaVersion sarama.KafkaVersion, chosenStaticPartition int32) *sarama.Config { + brokerConfig := newBrokerConfig(tlsConfig, retryOptions, kafkaVersion, chosenStaticPartition) + brokerConfig.ClientID = "test" + brokerConfig.Producer.MaxMessageBytes-- // FIXME https://jira.hyperledger.org/browse/FAB-4083 + return brokerConfig +} + +func newMockConsenter(brokerConfig *sarama.Config, tlsConfig localconfig.TLS, retryOptions localconfig.Retry, kafkaVersion sarama.KafkaVersion) *consenterImpl { + return &consenterImpl{ + brokerConfigVal: brokerConfig, + tlsConfigVal: tlsConfig, + retryOptionsVal: retryOptions, + kafkaVersionVal: kafkaVersion, + } +} + +func newMockConsumerMessage(wrappedMessage *ab.KafkaMessage) *sarama.ConsumerMessage { + return &sarama.ConsumerMessage{ + Value: sarama.ByteEncoder(utils.MarshalOrPanic(wrappedMessage)), + } +} + +func newMockEnvelope(content string) *cb.Envelope { + return &cb.Envelope{Payload: []byte(content)} +} + +func newMockLocalConfig(enableTLS bool, retryPeriod int, retryStop int, verboseLog bool) *localconfig.TopLevel { + return &localconfig.TopLevel{ + General: localconfig.General{ + TLS: localconfig.TLS{ + Enabled: enableTLS, + }, + }, + Kafka: localconfig.Kafka{ + Retry: localconfig.Retry{ + Period: time.Duration(retryPeriod) * time.Millisecond, + Stop: time.Duration(retryStop) * time.Millisecond, + }, + Verbose: verboseLog, + Version: sarama.V0_9_0_1, + }, + } +} + +func setupTestLogging(logLevel string, verbose bool) { + // This call allows us to (a) get the logging backend initialization that + // takes place in the `flogging` package, and (b) adjust the verbosity of + // the logs when running tests on this package. + flogging.SetModuleLevel(pkgLogID, logLevel) + + if verbose { + sarama.Logger = log.New(os.Stdout, "[sarama] ", log.Ldate|log.Lmicroseconds|log.Lshortfile) + } +} + +// Taken from orderer/solo/consensus_test.go +func syncQueueMessage(message *cb.Envelope, chain *chainImpl, mockBlockcutter *mockblockcutter.Receiver) { + chain.Enqueue(message) + mockBlockcutter.Block <- struct{}{} // We'll move past this line (and the function will return) only when the mock blockcutter is about to return +} + +func tamperBytes(original []byte) []byte { + byteCount := len(original) + return original[:byteCount-1] +} diff --git a/orderer/kafka/consumer.go b/orderer/kafka/consumer.go deleted file mode 100644 index 9c970ce3346..00000000000 --- a/orderer/kafka/consumer.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kafka - -import ( - "github.com/Shopify/sarama" - "github.com/hyperledger/fabric/orderer/localconfig" -) - -// Consumer allows the caller to receive a stream of blobs -// from the Kafka cluster for a specific partition. -type Consumer interface { - Recv() <-chan *sarama.ConsumerMessage - Errors() <-chan *sarama.ConsumerError - Closeable -} - -type consumerImpl struct { - parent sarama.Consumer - partition sarama.PartitionConsumer -} - -func newConsumer(brokers []string, kafkaVersion sarama.KafkaVersion, tls config.TLS, cp ChainPartition, offset int64) (Consumer, error) { - parent, err := sarama.NewConsumer(brokers, newBrokerConfig(kafkaVersion, rawPartition, tls)) - if err != nil { - return nil, err - } - partition, err := parent.ConsumePartition(cp.Topic(), cp.Partition(), offset) - if err != nil { - return nil, err - } - c := &consumerImpl{ - parent: parent, - partition: partition, - } - logger.Debugf("[channel: %s] Created new consumer for session (beginning offset: %d)", cp.Topic(), offset) - return c, nil -} - -// Recv returns a channel with blobs received -// from the Kafka cluster for a partition. -func (c *consumerImpl) Recv() <-chan *sarama.ConsumerMessage { - return c.partition.Messages() -} - -// Errors returns a channel with errors occurring during -// the consumption of a partition from the Kafka cluster. -func (c *consumerImpl) Errors() <-chan *sarama.ConsumerError { - return c.partition.Errors() -} - -// Close shuts down the partition consumer. -// Invoked by the session deliverer's Close method, which is itself called -// during the processSeek function, between disabling and enabling the push. -func (c *consumerImpl) Close() error { - if err := c.partition.Close(); err != nil { - return err - } - return c.parent.Close() -} diff --git a/orderer/kafka/consumer_mock_test.go b/orderer/kafka/consumer_mock_test.go deleted file mode 100644 index 3e50d163301..00000000000 --- a/orderer/kafka/consumer_mock_test.go +++ /dev/null @@ -1,138 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kafka - -import ( - "fmt" - "testing" - "time" - - ab "github.com/hyperledger/fabric/protos/orderer" - "github.com/hyperledger/fabric/protos/utils" - - "github.com/Shopify/sarama" - "github.com/Shopify/sarama/mocks" -) - -type mockConsumerImpl struct { - consumedOffset int64 - chainPartition ChainPartition - - parentConsumer *mocks.Consumer - chainPartitionManager *mocks.PartitionConsumer - chainPartitionConsumer sarama.PartitionConsumer - - disk chan *ab.KafkaMessage - isSetup chan struct{} - targetOffset int64 - t *testing.T -} - -func mockNewConsumer(t *testing.T, cp ChainPartition, offset int64, disk chan *ab.KafkaMessage) (Consumer, error) { - var err error - parentConsumer := mocks.NewConsumer(t, nil) - // NOTE The seek flag seems to be useless here. - // The mock partition will have its highWatermarkOffset - // initialized to 0 no matter what. I've opened up an issue - // in the sarama repo: https://github.com/Shopify/sarama/issues/745 - // Until this is resolved, use the testFillWithBlocks() hack below. - cpManager := parentConsumer.ExpectConsumePartition(cp.Topic(), cp.Partition(), offset) - cpConsumer, err := parentConsumer.ConsumePartition(cp.Topic(), cp.Partition(), offset) - // mockNewConsumer is basically a helper function when testing. - // Any errors it generates internally, should result in panic - // and not get propagated further; checking its errors in the - // calling functions (i.e. the actual tests) increases boilerplate. - if err != nil { - t.Fatal("Cannot create mock partition consumer:", err) - } - mc := &mockConsumerImpl{ - consumedOffset: 0, - targetOffset: offset, - chainPartition: cp, - - parentConsumer: parentConsumer, - chainPartitionManager: cpManager, - chainPartitionConsumer: cpConsumer, - disk: disk, - isSetup: make(chan struct{}), - t: t, - } - // Stop-gap hack until sarama issue #745 is resolved: - if mc.targetOffset >= testOldestOffset && mc.targetOffset <= (testNewestOffset-1) { - mc.testFillWithBlocks(mc.targetOffset - 1) // Prepare the consumer so that the next Recv gives you blob #targetOffset - } else { - err = fmt.Errorf("Out of range offset (seek number) given to consumer: %d", offset) - return mc, err - } - - return mc, err -} - -func (mc *mockConsumerImpl) Recv() <-chan *sarama.ConsumerMessage { - if mc.consumedOffset >= testNewestOffset-1 { - return nil - } - - // This is useful in cases where we want to <-Recv() in a for/select loop in - // a non-blocking manner. Without the timeout, the Go runtime will always - // execute the body of the Recv() method. If there in no outgoing message - // available, it will block while waiting on mc.disk. All the other cases in - // the original for/select loop then won't be evaluated until we unblock on - // <-mc.disk (which may never happen). - select { - case <-time.After(testTimePadding / 2): - case outgoingMsg := <-mc.disk: - mc.consumedOffset++ - mc.chainPartitionManager.YieldMessage(testNewConsumerMessage(mc.chainPartition, mc.consumedOffset, outgoingMsg)) - if mc.consumedOffset == mc.targetOffset-1 { - close(mc.isSetup) // Hook for callers - } - return mc.chainPartitionConsumer.Messages() - } - - return nil -} - -func (mc *mockConsumerImpl) Errors() <-chan *sarama.ConsumerError { - return nil -} - -func (mc *mockConsumerImpl) Close() error { - if err := mc.chainPartitionManager.Close(); err != nil { - return err - } - return mc.parentConsumer.Close() -} - -func (mc *mockConsumerImpl) testFillWithBlocks(offset int64) { - for i := int64(1); i <= offset; i++ { - go func() { - mc.disk <- newRegularMessage(utils.MarshalOrPanic(newTestEnvelope(fmt.Sprintf("consumer fill-in %d", i)))) - }() - <-mc.Recv() - } - return -} - -func testNewConsumerMessage(cp ChainPartition, offset int64, kafkaMessage *ab.KafkaMessage) *sarama.ConsumerMessage { - return &sarama.ConsumerMessage{ - Value: sarama.ByteEncoder(utils.MarshalOrPanic(kafkaMessage)), - Topic: cp.Topic(), - Partition: cp.Partition(), - Offset: offset, - } -} diff --git a/orderer/kafka/consumer_test.go b/orderer/kafka/consumer_test.go deleted file mode 100644 index b19ad2d6eae..00000000000 --- a/orderer/kafka/consumer_test.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kafka - -import ( - "testing" - - "github.com/hyperledger/fabric/common/configtx/tool/provisional" - ab "github.com/hyperledger/fabric/protos/orderer" -) - -func TestConsumerInitWrong(t *testing.T) { - cases := []int64{testOldestOffset - 1, testNewestOffset} - - for _, offset := range cases { - mc, err := mockNewConsumer(t, newChainPartition(provisional.TestChainID, rawPartition), offset, make(chan *ab.KafkaMessage)) - testClose(t, mc) - if err == nil { - t.Fatal("Consumer should have failed with out-of-range error") - } - } -} - -func TestConsumerRecv(t *testing.T) { - t.Run("oldest", testConsumerRecvFunc(testOldestOffset, testOldestOffset)) - t.Run("in-between", testConsumerRecvFunc(testMiddleOffset, testMiddleOffset)) - t.Run("newest", testConsumerRecvFunc(testNewestOffset-1, testNewestOffset-1)) -} - -func testConsumerRecvFunc(given, expected int64) func(t *testing.T) { - disk := make(chan *ab.KafkaMessage) - return func(t *testing.T) { - cp := newChainPartition(provisional.TestChainID, rawPartition) - mc, err := mockNewConsumer(t, cp, given, disk) - if err != nil { - testClose(t, mc) - t.Fatal("Consumer should have proceeded normally:", err) - } - <-mc.(*mockConsumerImpl).isSetup - go func() { - disk <- newRegularMessage([]byte("foo")) - }() - msg := <-mc.Recv() - if (msg.Topic != cp.Topic()) || - msg.Partition != cp.Partition() || - msg.Offset != mc.(*mockConsumerImpl).consumedOffset || - msg.Offset != expected { - t.Fatalf("Expected message with offset %d, got %d", expected, msg.Offset) - } - testClose(t, mc) - } -} diff --git a/orderer/kafka/log.go b/orderer/kafka/log.go deleted file mode 100644 index eaf3927a193..00000000000 --- a/orderer/kafka/log.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kafka - -import ( - "github.com/hyperledger/fabric/common/flogging" - "github.com/op/go-logging" -) - -const pkgLogID = "orderer/kafka" - -var logger *logging.Logger - -func init() { - logger = flogging.MustGetLogger(pkgLogID) -} diff --git a/orderer/kafka/log_test.go b/orderer/kafka/log_test.go deleted file mode 100644 index c9a3e1b6608..00000000000 --- a/orderer/kafka/log_test.go +++ /dev/null @@ -1,28 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kafka - -import ( - "github.com/hyperledger/fabric/common/flogging" -) - -func init() { - // This call allows us to (a) get the logging backend initialization that - // takes place in the `flogging` package, and (b) adjust the verbosity of - // the logs when running tests on this package. - flogging.SetModuleLevel(pkgLogID, "ERROR") -} diff --git a/orderer/kafka/orderer.go b/orderer/kafka/orderer.go deleted file mode 100644 index 16fa0d87bdc..00000000000 --- a/orderer/kafka/orderer.go +++ /dev/null @@ -1,330 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kafka - -import ( - "time" - - "github.com/Shopify/sarama" - "github.com/golang/protobuf/proto" - "github.com/hyperledger/fabric/orderer/localconfig" - "github.com/hyperledger/fabric/orderer/multichain" - cb "github.com/hyperledger/fabric/protos/common" - ab "github.com/hyperledger/fabric/protos/orderer" - "github.com/hyperledger/fabric/protos/utils" -) - -// New creates a Kafka-backed consenter. Called by orderer's main.go. -func New(kv sarama.KafkaVersion, ro config.Retry, tls config.TLS) multichain.Consenter { - return newConsenter(kv, ro, tls, bfValue, pfValue, cfValue) -} - -// New calls here because we need to pass additional arguments to -// the constructor and New() should only read from the config file. -func newConsenter(kv sarama.KafkaVersion, ro config.Retry, tls config.TLS, bf bfType, pf pfType, cf cfType) multichain.Consenter { - return &consenterImpl{kv, ro, tls, bf, pf, cf} -} - -// bfType defines the signature of the broker constructor. -type bfType func([]string, ChainPartition) (Broker, error) - -// pfType defines the signature of the producer constructor. -type pfType func([]string, sarama.KafkaVersion, config.Retry, config.TLS) Producer - -// cfType defines the signature of the consumer constructor. -type cfType func([]string, sarama.KafkaVersion, config.TLS, ChainPartition, int64) (Consumer, error) - -// bfValue holds the value for the broker constructor that's used in the non-test case. -var bfValue = func(brokers []string, cp ChainPartition) (Broker, error) { - return newBroker(brokers, cp) -} - -// pfValue holds the value for the producer constructor that's used in the non-test case. -var pfValue = func(brokers []string, kafkaVersion sarama.KafkaVersion, retryOptions config.Retry, tls config.TLS) Producer { - return newProducer(brokers, kafkaVersion, retryOptions, tls) -} - -// cfValue holds the value for the consumer constructor that's used in the non-test case. -var cfValue = func(brokers []string, kafkaVersion sarama.KafkaVersion, tls config.TLS, cp ChainPartition, offset int64) (Consumer, error) { - return newConsumer(brokers, kafkaVersion, tls, cp, offset) -} - -// consenterImpl holds the implementation of type that satisfies the -// multichain.Consenter and testableConsenter interfaces. The former -// is needed because that is what the HandleChain contract requires. -// The latter is needed for testing. -type consenterImpl struct { - kv sarama.KafkaVersion - ro config.Retry - tls config.TLS - bf bfType - pf pfType - cf cfType -} - -// HandleChain creates/returns a reference to a Chain for the given set of support resources. -// Implements the multichain.Consenter interface. Called by multichain.newChainSupport(), which -// is itself called by multichain.NewManagerImpl() when ranging over the ledgerFactory's existingChains. -func (co *consenterImpl) HandleChain(cs multichain.ConsenterSupport, metadata *cb.Metadata) (multichain.Chain, error) { - return newChain(co, cs, getLastOffsetPersisted(metadata, cs.ChainID())), nil -} - -func getLastOffsetPersisted(metadata *cb.Metadata, chainID string) int64 { - if metadata.Value != nil { - // Extract orderer-related metadata from the tip of the ledger first - kafkaMetadata := &ab.KafkaMetadata{} - if err := proto.Unmarshal(metadata.Value, kafkaMetadata); err != nil { - logger.Panicf("[channel: %s] Ledger may be corrupted:"+ - "cannot unmarshal orderer metadata in most recent block", chainID) - } - return kafkaMetadata.LastOffsetPersisted - } - return (sarama.OffsetOldest - 1) // default -} - -// When testing we need to inject our own broker/producer/consumer. -// Therefore we need to (a) hold a reference to an object that stores -// the broker/producer/consumer constructors, and (b) refer to that -// object via its interface type, so that we can use a different -// implementation when testing. This, in turn, calls for (c) —- the -// definition of an interface (see testableConsenter below) that will -// be satisfied by both the actual and the mock object and will allow -// us to retrieve these constructors. -func newChain(consenter testableConsenter, support multichain.ConsenterSupport, lastOffsetPersisted int64) *chainImpl { - lastCutBlock := support.Height() - 1 - logger.Debugf("[channel: %s] Starting chain with last persisted offset %d and last recorded block %d", - support.ChainID(), lastOffsetPersisted, lastCutBlock) - return &chainImpl{ - consenter: consenter, - support: support, - partition: newChainPartition(support.ChainID(), rawPartition), - batchTimeout: support.SharedConfig().BatchTimeout(), - lastOffsetPersisted: lastOffsetPersisted, - lastCutBlock: lastCutBlock, - producer: consenter.prodFunc()(support.SharedConfig().KafkaBrokers(), consenter.kafkaVersion(), consenter.retryOptions(), consenter.tlsConfig()), - halted: false, // Redundant as the default value for booleans is false but added for readability - exitChan: make(chan struct{}), - haltedChan: make(chan struct{}), - setupChan: make(chan struct{}), - } -} - -// Satisfied by both chainImpl consenterImpl and mockConsenterImpl. -// Defined so as to facilitate testing. -type testableConsenter interface { - kafkaVersion() sarama.KafkaVersion - retryOptions() config.Retry - tlsConfig() config.TLS - brokFunc() bfType - prodFunc() pfType - consFunc() cfType -} - -func (co *consenterImpl) kafkaVersion() sarama.KafkaVersion { return co.kv } -func (co *consenterImpl) retryOptions() config.Retry { return co.ro } -func (co *consenterImpl) tlsConfig() config.TLS { return co.tls } -func (co *consenterImpl) brokFunc() bfType { return co.bf } -func (co *consenterImpl) prodFunc() pfType { return co.pf } -func (co *consenterImpl) consFunc() cfType { return co.cf } - -type chainImpl struct { - consenter testableConsenter - support multichain.ConsenterSupport - - partition ChainPartition - batchTimeout time.Duration - lastOffsetPersisted int64 - lastCutBlock uint64 - - producer Producer - consumer Consumer - - halted bool // For the Enqueue() calls - exitChan chan struct{} // For the Chain's Halt() method - - // Hooks for testing - haltedChan chan struct{} - setupChan chan struct{} -} - -// Start allocates the necessary resources for staying up to date with this Chain. -// Implements the multichain.Chain interface. Called by multichain.NewManagerImpl() -// which is invoked when the ordering process is launched, before the call to NewServer(). -func (ch *chainImpl) Start() { - // 1. Post the CONNECT message to prevent panicking that occurs - // when seeking on a partition that hasn't been created yet. - logger.Debugf("[channel: %s] Posting the CONNECT message...", ch.support.ChainID()) - if err := ch.producer.Send(ch.partition, utils.MarshalOrPanic(newConnectMessage())); err != nil { - logger.Criticalf("[channel: %s] Cannot post CONNECT message: %s", ch.support.ChainID(), err) - close(ch.exitChan) - ch.halted = true - return - } - logger.Debugf("[channel: %s] CONNECT message posted successfully", ch.support.ChainID()) - - // 2. Set up the listener/consumer for this partition. - consumer, err := ch.consenter.consFunc()(ch.support.SharedConfig().KafkaBrokers(), ch.consenter.kafkaVersion(), ch.consenter.tlsConfig(), ch.partition, ch.lastOffsetPersisted+1) - if err != nil { - logger.Criticalf("[channel: %s] Cannot retrieve requested offset from Kafka cluster: %s", ch.support.ChainID(), err) - close(ch.exitChan) - ch.halted = true - return - } - ch.consumer = consumer - close(ch.setupChan) - go ch.listenForErrors() - - // 3. Set the loop the keep up to date with the chain. - go ch.loop() -} - -func (ch *chainImpl) listenForErrors() { - select { - case <-ch.exitChan: - return - case err := <-ch.consumer.Errors(): - logger.Error(err) - } -} - -// Halt frees the resources which were allocated for this Chain. -// Implements the multichain.Chain interface. -func (ch *chainImpl) Halt() { - select { - case <-ch.exitChan: - // This construct is useful because it allows Halt() to be - // called multiple times w/o panicking. Recal that a receive - // from a closed channel returns (the zero value) immediately. - logger.Debugf("[channel: %s] Halting of chain requested again", ch.support.ChainID()) - default: - logger.Debugf("[channel: %s] Halting of chain requested", ch.support.ChainID()) - close(ch.exitChan) - } -} - -// Enqueue accepts a message and returns true on acceptance, or false on shutdown. -// Implements the multichain.Chain interface. Called by the drainQueue goroutine, -// which is spawned when the broadcast handler's Handle() function is invoked. -func (ch *chainImpl) Enqueue(env *cb.Envelope) bool { - if ch.halted { - return false - } - - logger.Debugf("[channel: %s] Enqueueing envelope...", ch.support.ChainID()) - if err := ch.producer.Send(ch.partition, utils.MarshalOrPanic(newRegularMessage(utils.MarshalOrPanic(env)))); err != nil { - logger.Errorf("[channel: %s] cannot enqueue envelope: %s", ch.support.ChainID(), err) - return false - } - logger.Debugf("[channel: %s] Envelope enqueued successfully", ch.support.ChainID()) - - return !ch.halted // If ch.halted has been set to true while sending, we should return false -} - -func (ch *chainImpl) loop() { - msg := new(ab.KafkaMessage) - var timer <-chan time.Time - var ttcNumber uint64 - var encodedLastOffsetPersisted []byte - - defer close(ch.haltedChan) - defer ch.producer.Close() - defer func() { ch.halted = true }() - defer ch.consumer.Close() - - for { - select { - case in := <-ch.consumer.Recv(): - if err := proto.Unmarshal(in.Value, msg); err != nil { - // This shouldn't happen, it should be filtered at ingress - logger.Criticalf("[channel: %s] Unable to unmarshal consumed message:", ch.support.ChainID(), err) - } - logger.Debugf("[channel: %s] Successfully unmarshalled consumed message. Inspecting type...", ch.support.ChainID()) - switch msg.Type.(type) { - case *ab.KafkaMessage_Connect: - logger.Debugf("[channel: %s] It's a connect message - ignoring", ch.support.ChainID()) - continue - case *ab.KafkaMessage_TimeToCut: - ttcNumber = msg.GetTimeToCut().BlockNumber - logger.Debugf("[channel: %s] It's a time-to-cut message for block %d", ch.support.ChainID(), ttcNumber) - if ttcNumber == ch.lastCutBlock+1 { - timer = nil - logger.Debugf("[channel: %s] Nil'd the timer", ch.support.ChainID()) - batch, committers := ch.support.BlockCutter().Cut() - if len(batch) == 0 { - logger.Warningf("[channel: %s] Got right time-to-cut message (for block %d),"+ - " no pending requests though; this might indicate a bug", ch.support.ChainID(), ch.lastCutBlock) - logger.Infof("[channel: %s] Consenter for channel exiting", ch.support.ChainID()) - return - } - block := ch.support.CreateNextBlock(batch) - encodedLastOffsetPersisted = utils.MarshalOrPanic(&ab.KafkaMetadata{LastOffsetPersisted: in.Offset}) - ch.support.WriteBlock(block, committers, encodedLastOffsetPersisted) - ch.lastCutBlock++ - logger.Debugf("[channel: %s] Proper time-to-cut received, just cut block %d", - ch.support.ChainID(), ch.lastCutBlock) - continue - } else if ttcNumber > ch.lastCutBlock+1 { - logger.Warningf("[channel: %s] Got larger time-to-cut message (%d) than allowed (%d)"+ - " - this might indicate a bug", ch.support.ChainID(), ttcNumber, ch.lastCutBlock+1) - logger.Infof("[channel: %s] Consenter for channel exiting", ch.support.ChainID()) - return - } - logger.Debugf("[channel: %s] Ignoring stale time-to-cut-message for block %d", ch.support.ChainID(), ch.lastCutBlock) - case *ab.KafkaMessage_Regular: - env := new(cb.Envelope) - if err := proto.Unmarshal(msg.GetRegular().Payload, env); err != nil { - // This shouldn't happen, it should be filtered at ingress - logger.Criticalf("[channel: %s] Unable to unmarshal consumed regular message:", ch.support.ChainID(), err) - continue - } - batches, committers, ok := ch.support.BlockCutter().Ordered(env) - logger.Debugf("[channel: %s] Ordering results: items in batch = %v, ok = %v", ch.support.ChainID(), batches, ok) - if ok && len(batches) == 0 && timer == nil { - timer = time.After(ch.batchTimeout) - logger.Debugf("[channel: %s] Just began %s batch timer", ch.support.ChainID(), ch.batchTimeout.String()) - continue - } - // If !ok, batches == nil, so this will be skipped - for i, batch := range batches { - block := ch.support.CreateNextBlock(batch) - encodedLastOffsetPersisted = utils.MarshalOrPanic(&ab.KafkaMetadata{LastOffsetPersisted: in.Offset}) - ch.support.WriteBlock(block, committers[i], encodedLastOffsetPersisted) - ch.lastCutBlock++ - logger.Debugf("[channel: %s] Batch filled, just cut block %d", ch.support.ChainID(), ch.lastCutBlock) - } - if len(batches) > 0 { - timer = nil - } - } - case <-timer: - logger.Debugf("[channel: %s] Time-to-cut block %d timer expired", ch.support.ChainID(), ch.lastCutBlock+1) - timer = nil - if err := ch.producer.Send(ch.partition, utils.MarshalOrPanic(newTimeToCutMessage(ch.lastCutBlock+1))); err != nil { - logger.Errorf("[channel: %s] Cannot post time-to-cut message: %s", ch.support.ChainID(), err) - // Do not exit - } - case <-ch.exitChan: // When Halt() is called - logger.Infof("[channel: %s] Consenter for channel exiting", ch.support.ChainID()) - return - } - } -} - -// Closeable allows the shut down of the calling resource. -type Closeable interface { - Close() error -} diff --git a/orderer/kafka/orderer_test.go b/orderer/kafka/orderer_test.go deleted file mode 100644 index ebba5bafd2b..00000000000 --- a/orderer/kafka/orderer_test.go +++ /dev/null @@ -1,665 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kafka - -import ( - "fmt" - "sync" - "testing" - "time" - - "github.com/Shopify/sarama" - "github.com/hyperledger/fabric/common/configtx/tool/provisional" - mockconfig "github.com/hyperledger/fabric/common/mocks/config" - "github.com/hyperledger/fabric/orderer/localconfig" - mockblockcutter "github.com/hyperledger/fabric/orderer/mocks/blockcutter" - mockmultichain "github.com/hyperledger/fabric/orderer/mocks/multichain" - "github.com/hyperledger/fabric/orderer/multichain" - cb "github.com/hyperledger/fabric/protos/common" - ab "github.com/hyperledger/fabric/protos/orderer" - "github.com/hyperledger/fabric/protos/utils" -) - -var cp = newChainPartition(provisional.TestChainID, rawPartition) - -type mockConsenterImpl struct { - consenterImpl - prodDisk, consDisk chan *ab.KafkaMessage - t *testing.T -} - -func mockNewConsenter(t *testing.T, kafkaVersion sarama.KafkaVersion, retryOptions config.Retry, nextProducedOffset int64) *mockConsenterImpl { - prodDisk := make(chan *ab.KafkaMessage) - consDisk := make(chan *ab.KafkaMessage) - - mockTLS := config.TLS{Enabled: false} - - mockBfValue := func(brokers []string, cp ChainPartition) (Broker, error) { - return mockNewBroker(t, cp) - } - mockPfValue := func(brokers []string, kafkaVersion sarama.KafkaVersion, retryOptions config.Retry, tls config.TLS) Producer { - // The first Send on this producer will return a blob with offset #nextProducedOffset - return mockNewProducer(t, cp, nextProducedOffset, prodDisk) - } - mockCfValue := func(brokers []string, kafkaVersion sarama.KafkaVersion, tls config.TLS, cp ChainPartition, lastPersistedOffset int64) (Consumer, error) { - if lastPersistedOffset != nextProducedOffset { - panic(fmt.Errorf("Mock objects about to be set up incorrectly (consumer to seek to %d, producer to post %d)", lastPersistedOffset, nextProducedOffset)) - } - return mockNewConsumer(t, cp, lastPersistedOffset, consDisk) - } - - return &mockConsenterImpl{ - consenterImpl: consenterImpl{ - kv: kafkaVersion, - ro: retryOptions, - tls: mockTLS, - bf: mockBfValue, - pf: mockPfValue, - cf: mockCfValue, - }, - prodDisk: prodDisk, - consDisk: consDisk, - t: t, - } -} - -func prepareMockObjectDisks(t *testing.T, co *mockConsenterImpl, ch *chainImpl) { - // Wait until the mock producer is done before messing around with its disk - select { - case <-ch.producer.(*mockProducerImpl).isSetup: - // Dispense with the CONNECT message that is posted with Start() - <-co.prodDisk - case <-time.After(testTimePadding): - t.Fatal("Mock producer not setup in time") - } - // Same for the mock consumer - select { - case <-ch.setupChan: - case <-time.After(testTimePadding): - t.Fatal("Mock consumer not setup in time") - } -} - -func syncQueueMessage(msg *cb.Envelope, chain multichain.Chain, bc *mockblockcutter.Receiver) { - chain.Enqueue(msg) - bc.Block <- struct{}{} -} - -func waitableSyncQueueMessage(env *cb.Envelope, messagesToPickUp int, wg *sync.WaitGroup, - co *mockConsenterImpl, cs *mockmultichain.ConsenterSupport, ch *chainImpl) { - wg.Add(1) - go func() { - defer wg.Done() - for i := 0; i < messagesToPickUp; i++ { - // On the first iteration of this loop, the message that will be picked up - // is the one posted via the syncQueueMessage/Enqueue call below - msg := <-co.prodDisk - // Place it to the right location so that the mockConsumer can read it - co.consDisk <- msg - } - }() - - syncQueueMessage(env, ch, cs.BlockCutterVal) - // The message has already been moved to the consumer's disk, - // otherwise syncQueueMessage wouldn't return, so the Wait() - // here is unnecessary but let's be paranoid. - wg.Wait() -} - -func TestKafkaConsenterEmptyBatch(t *testing.T) { - var wg sync.WaitGroup - defer wg.Wait() - cs := &mockmultichain.ConsenterSupport{ - Batches: make(chan []*cb.Envelope), - BlockCutterVal: mockblockcutter.NewReceiver(), - ChainIDVal: provisional.TestChainID, - SharedConfigVal: &mockconfig.Orderer{BatchTimeoutVal: testTimePadding}, - } - defer close(cs.BlockCutterVal.Block) - - lastPersistedOffset := testOldestOffset - 1 - nextProducedOffset := lastPersistedOffset + 1 - co := mockNewConsenter(t, testConf.Kafka.Version, testConf.Kafka.Retry, nextProducedOffset) - ch := newChain(co, cs, lastPersistedOffset) - - go ch.Start() - defer ch.Halt() - - prepareMockObjectDisks(t, co, ch) - - waitableSyncQueueMessage(newTestEnvelope("one"), 1, &wg, co, cs, ch) - - // Stop the loop - ch.Halt() - - select { - case <-cs.Batches: - t.Fatal("Expected no invocations of Append") - case <-ch.haltedChan: // If we're here, we definitely had a chance to invoke Append but didn't (which is great) - } -} - -func TestKafkaConsenterBatchTimer(t *testing.T) { - var wg sync.WaitGroup - defer wg.Wait() - - batchTimeout, _ := time.ParseDuration("1ms") - cs := &mockmultichain.ConsenterSupport{ - Batches: make(chan []*cb.Envelope), - BlockCutterVal: mockblockcutter.NewReceiver(), - ChainIDVal: provisional.TestChainID, - SharedConfigVal: &mockconfig.Orderer{BatchTimeoutVal: batchTimeout}, - } - defer close(cs.BlockCutterVal.Block) - - lastPersistedOffset := testOldestOffset - 1 - nextProducedOffset := lastPersistedOffset + 1 - co := mockNewConsenter(t, testConf.Kafka.Version, testConf.Kafka.Retry, nextProducedOffset) - ch := newChain(co, cs, lastPersistedOffset) - - go ch.Start() - defer ch.Halt() - - prepareMockObjectDisks(t, co, ch) - - // The second message that will be picked up is the time-to-cut message - // that will be posted when the short timer expires - waitableSyncQueueMessage(newTestEnvelope("one"), 2, &wg, co, cs, ch) - - select { - case <-cs.Batches: // This is the success path - case <-time.After(testTimePadding): - t.Fatal("Expected block to be cut because batch timer expired") - } - - // As above - waitableSyncQueueMessage(newTestEnvelope("two"), 2, &wg, co, cs, ch) - - select { - case <-cs.Batches: // This is the success path - case <-time.After(testTimePadding): - t.Fatal("Expected second block to be cut, batch timer not reset") - } - - // Stop the loop - ch.Halt() - - select { - case <-cs.Batches: - t.Fatal("Expected no invocations of Append") - case <-ch.haltedChan: // If we're here, we definitely had a chance to invoke Append but didn't (which is great) - } -} - -func TestKafkaConsenterTimerHaltOnFilledBatch(t *testing.T) { - var wg sync.WaitGroup - defer wg.Wait() - - batchTimeout, _ := time.ParseDuration("1h") - cs := &mockmultichain.ConsenterSupport{ - Batches: make(chan []*cb.Envelope), - BlockCutterVal: mockblockcutter.NewReceiver(), - ChainIDVal: provisional.TestChainID, - SharedConfigVal: &mockconfig.Orderer{BatchTimeoutVal: batchTimeout}, - } - defer close(cs.BlockCutterVal.Block) - - lastPersistedOffset := testOldestOffset - 1 - nextProducedOffset := lastPersistedOffset + 1 - co := mockNewConsenter(t, testConf.Kafka.Version, testConf.Kafka.Retry, nextProducedOffset) - ch := newChain(co, cs, lastPersistedOffset) - - go ch.Start() - defer ch.Halt() - - prepareMockObjectDisks(t, co, ch) - - waitableSyncQueueMessage(newTestEnvelope("one"), 1, &wg, co, cs, ch) - - cs.BlockCutterVal.CutNext = true - - waitableSyncQueueMessage(newTestEnvelope("two"), 1, &wg, co, cs, ch) - - select { - case <-cs.Batches: - case <-time.After(testTimePadding): - t.Fatal("Expected block to be cut because batch timer expired") - } - - // Change the batch timeout to be near instant. - // If the timer was not reset, it will still be waiting an hour. - ch.batchTimeout = time.Millisecond - - cs.BlockCutterVal.CutNext = false - - // The second message that will be picked up is the time-to-cut message - // that will be posted when the short timer expires - waitableSyncQueueMessage(newTestEnvelope("three"), 2, &wg, co, cs, ch) - - select { - case <-cs.Batches: - case <-time.After(testTimePadding): - t.Fatalf("Did not cut the second block, indicating that the old timer was still running") - } - - // Stop the loop - ch.Halt() - - select { - case <-cs.Batches: - t.Fatal("Expected no invocations of Append") - case <-ch.haltedChan: // If we're here, we definitely had a chance to invoke Append but didn't (which is great) - } -} - -func TestKafkaConsenterConfigStyleMultiBatch(t *testing.T) { - var wg sync.WaitGroup - defer wg.Wait() - - cs := &mockmultichain.ConsenterSupport{ - Batches: make(chan []*cb.Envelope), - BlockCutterVal: mockblockcutter.NewReceiver(), - ChainIDVal: provisional.TestChainID, - SharedConfigVal: &mockconfig.Orderer{BatchTimeoutVal: testTimePadding}, - } - defer close(cs.BlockCutterVal.Block) - - lastPersistedOffset := testOldestOffset - 1 - nextProducedOffset := lastPersistedOffset + 1 - co := mockNewConsenter(t, testConf.Kafka.Version, testConf.Kafka.Retry, nextProducedOffset) - ch := newChain(co, cs, lastPersistedOffset) - - go ch.Start() - defer ch.Halt() - - prepareMockObjectDisks(t, co, ch) - - waitableSyncQueueMessage(newTestEnvelope("one"), 1, &wg, co, cs, ch) - - cs.BlockCutterVal.IsolatedTx = true - - waitableSyncQueueMessage(newTestEnvelope("two"), 1, &wg, co, cs, ch) - - ch.Halt() - - select { - case <-cs.Batches: - case <-time.After(testTimePadding): - t.Fatal("Expected two blocks to be cut but never got the first") - } - - select { - case <-cs.Batches: - case <-time.After(testTimePadding): - t.Fatal("Expected the config type tx to create two blocks, but only got the first") - } - - select { - case <-time.After(testTimePadding): - t.Fatal("Should have exited") - case <-ch.haltedChan: - } -} - -func TestKafkaConsenterTimeToCutForced(t *testing.T) { - var wg sync.WaitGroup - defer wg.Wait() - - batchTimeout, _ := time.ParseDuration("1h") - cs := &mockmultichain.ConsenterSupport{ - Batches: make(chan []*cb.Envelope), - BlockCutterVal: mockblockcutter.NewReceiver(), - ChainIDVal: provisional.TestChainID, - SharedConfigVal: &mockconfig.Orderer{BatchTimeoutVal: batchTimeout}, - } - defer close(cs.BlockCutterVal.Block) - - lastPersistedOffset := testOldestOffset - 1 - nextProducedOffset := lastPersistedOffset + 1 - co := mockNewConsenter(t, testConf.Kafka.Version, testConf.Kafka.Retry, nextProducedOffset) - ch := newChain(co, cs, lastPersistedOffset) - - go ch.Start() - defer ch.Halt() - - prepareMockObjectDisks(t, co, ch) - - waitableSyncQueueMessage(newTestEnvelope("one"), 1, &wg, co, cs, ch) - - cs.BlockCutterVal.CutNext = true - - // This is like the waitableSyncQueueMessage routine with the difference - // that we post a time-to-cut message instead of a test envelope. - wg.Add(1) - go func() { - defer wg.Done() - msg := <-co.prodDisk - co.consDisk <- msg - }() - - if err := ch.producer.Send(ch.partition, utils.MarshalOrPanic(newTimeToCutMessage(ch.lastCutBlock+1))); err != nil { - t.Fatalf("Couldn't post to %s: %s", ch.partition, err) - } - wg.Wait() - - select { - case <-cs.Batches: - case <-time.After(testTimePadding): - t.Fatal("Expected block to be cut because proper time-to-cut was sent") - } - - // Stop the loop - ch.Halt() - - select { - case <-cs.Batches: - t.Fatal("Expected no invocations of Append") - case <-ch.haltedChan: // If we're here, we definitely had a chance to invoke Append but didn't (which is great) - } -} - -func TestKafkaConsenterTimeToCutDuplicate(t *testing.T) { - var wg sync.WaitGroup - defer wg.Wait() - - batchTimeout, _ := time.ParseDuration("1h") - cs := &mockmultichain.ConsenterSupport{ - Batches: make(chan []*cb.Envelope), - BlockCutterVal: mockblockcutter.NewReceiver(), - ChainIDVal: provisional.TestChainID, - SharedConfigVal: &mockconfig.Orderer{BatchTimeoutVal: batchTimeout}, - } - defer close(cs.BlockCutterVal.Block) - - lastPersistedOffset := testOldestOffset - 1 - nextProducedOffset := lastPersistedOffset + 1 - co := mockNewConsenter(t, testConf.Kafka.Version, testConf.Kafka.Retry, nextProducedOffset) - ch := newChain(co, cs, lastPersistedOffset) - - go ch.Start() - defer ch.Halt() - - prepareMockObjectDisks(t, co, ch) - - waitableSyncQueueMessage(newTestEnvelope("one"), 1, &wg, co, cs, ch) - - cs.BlockCutterVal.CutNext = true - - // This is like the waitableSyncQueueMessage routine with the difference - // that we post a time-to-cut message instead of a test envelope. - wg.Add(1) - go func() { - defer wg.Done() - msg := <-co.prodDisk - co.consDisk <- msg - }() - - // Send a proper time-to-cut message - if err := ch.producer.Send(ch.partition, utils.MarshalOrPanic(newTimeToCutMessage(ch.lastCutBlock+1))); err != nil { - t.Fatalf("Couldn't post to %s: %s", ch.partition, err) - } - wg.Wait() - - select { - case <-cs.Batches: - case <-time.After(testTimePadding): - t.Fatal("Expected block to be cut because proper time-to-cut was sent") - } - - cs.BlockCutterVal.CutNext = false - - waitableSyncQueueMessage(newTestEnvelope("two"), 1, &wg, co, cs, ch) - - cs.BlockCutterVal.CutNext = true - // ATTN: We set `cs.BlockCutterVal.CutNext` to true on purpose - // If the logic works right, the orderer should discard the - // duplicate TTC message below and a call to the block cutter - // will only happen when the long, hour-long timer expires - - // As above - wg.Add(1) - go func() { - defer wg.Done() - msg := <-co.prodDisk - co.consDisk <- msg - }() - - // Send a duplicate time-to-cut message - if err := ch.producer.Send(ch.partition, utils.MarshalOrPanic(newTimeToCutMessage(ch.lastCutBlock))); err != nil { - t.Fatalf("Couldn't post to %s: %s", ch.partition, err) - } - wg.Wait() - - select { - case <-cs.Batches: - t.Fatal("Should have discarded duplicate time-to-cut") - case <-time.After(testTimePadding): - // This is the success path - } - - // Stop the loop - ch.Halt() - - select { - case <-cs.Batches: - t.Fatal("Expected no invocations of Append") - case <-ch.haltedChan: // If we're here, we definitely had a chance to invoke Append but didn't (which is great) - } -} - -func TestKafkaConsenterTimeToCutStale(t *testing.T) { - var wg sync.WaitGroup - defer wg.Wait() - - batchTimeout, _ := time.ParseDuration("1h") - cs := &mockmultichain.ConsenterSupport{ - Batches: make(chan []*cb.Envelope), - BlockCutterVal: mockblockcutter.NewReceiver(), - ChainIDVal: provisional.TestChainID, - SharedConfigVal: &mockconfig.Orderer{BatchTimeoutVal: batchTimeout}, - } - defer close(cs.BlockCutterVal.Block) - - lastPersistedOffset := testOldestOffset - 1 - nextProducedOffset := lastPersistedOffset + 1 - co := mockNewConsenter(t, testConf.Kafka.Version, testConf.Kafka.Retry, nextProducedOffset) - ch := newChain(co, cs, lastPersistedOffset) - - go ch.Start() - defer ch.Halt() - - prepareMockObjectDisks(t, co, ch) - - waitableSyncQueueMessage(newTestEnvelope("one"), 1, &wg, co, cs, ch) - - cs.BlockCutterVal.CutNext = true - - // This is like the waitableSyncQueueMessage routine with the difference - // that we post a time-to-cut message instead of a test envelope. - wg.Add(1) - go func() { - defer wg.Done() - msg := <-co.prodDisk - co.consDisk <- msg - }() - - // Send a stale time-to-cut message - if err := ch.producer.Send(ch.partition, utils.MarshalOrPanic(newTimeToCutMessage(ch.lastCutBlock))); err != nil { - t.Fatalf("Couldn't post to %s: %s", ch.partition, err) - } - wg.Wait() - - select { - case <-cs.Batches: - t.Fatal("Should have ignored stale time-to-cut") - case <-time.After(testTimePadding): - // This is the success path - } - - // Stop the loop - ch.Halt() - - select { - case <-cs.Batches: - t.Fatal("Expected no invocations of Append") - case <-ch.haltedChan: // If we're here, we definitely had a chance to invoke Append but didn't (which is great) - } -} - -func TestKafkaConsenterTimeToCutLarger(t *testing.T) { - var wg sync.WaitGroup - defer wg.Wait() - - batchTimeout, _ := time.ParseDuration("1h") - cs := &mockmultichain.ConsenterSupport{ - Batches: make(chan []*cb.Envelope), - BlockCutterVal: mockblockcutter.NewReceiver(), - ChainIDVal: provisional.TestChainID, - SharedConfigVal: &mockconfig.Orderer{BatchTimeoutVal: batchTimeout}, - } - defer close(cs.BlockCutterVal.Block) - - lastPersistedOffset := testOldestOffset - 1 - nextProducedOffset := lastPersistedOffset + 1 - co := mockNewConsenter(t, testConf.Kafka.Version, testConf.Kafka.Retry, nextProducedOffset) - ch := newChain(co, cs, lastPersistedOffset) - - go ch.Start() - defer ch.Halt() - - prepareMockObjectDisks(t, co, ch) - - waitableSyncQueueMessage(newTestEnvelope("one"), 1, &wg, co, cs, ch) - - cs.BlockCutterVal.CutNext = true - - // This is like the waitableSyncQueueMessage routine with the difference - // that we post a time-to-cut message instead of a test envelope. - wg.Add(1) - go func() { - defer wg.Done() - msg := <-co.prodDisk - co.consDisk <- msg - }() - - // Send a stale time-to-cut message - if err := ch.producer.Send(ch.partition, utils.MarshalOrPanic(newTimeToCutMessage(ch.lastCutBlock+2))); err != nil { - t.Fatalf("Couldn't post to %s: %s", ch.partition, err) - } - wg.Wait() - - select { - case <-cs.Batches: - t.Fatal("Should have ignored larger time-to-cut than expected") - case <-time.After(testTimePadding): - // This is the success path - } - - // Loop is already stopped, but this is a good test to see - // if a second invokation of Halt() panicks. (It shouldn't.) - defer func() { - if r := recover(); r != nil { - t.Fatal("Expected duplicate call to Halt to succeed") - } - }() - - ch.Halt() - - select { - case <-cs.Batches: - t.Fatal("Expected no invocations of Append") - case <-ch.haltedChan: // If we're here, we definitely had a chance to invoke Append but didn't (which is great) - } -} - -func TestGetLastOffsetPersistedEmpty(t *testing.T) { - expected := sarama.OffsetOldest - 1 - actual := getLastOffsetPersisted(&cb.Metadata{}, "") - if actual != expected { - t.Fatalf("Expected last offset %d, got %d", expected, actual) - } -} - -func TestGetLastOffsetPersistedRight(t *testing.T) { - expected := int64(100) - actual := getLastOffsetPersisted(&cb.Metadata{Value: utils.MarshalOrPanic(&ab.KafkaMetadata{LastOffsetPersisted: expected})}, "") - if actual != expected { - t.Fatalf("Expected last offset %d, got %d", expected, actual) - } -} - -func TestKafkaConsenterRestart(t *testing.T) { - var wg sync.WaitGroup - defer wg.Wait() - - batchTimeout, _ := time.ParseDuration("1ms") - cs := &mockmultichain.ConsenterSupport{ - Batches: make(chan []*cb.Envelope), - BlockCutterVal: mockblockcutter.NewReceiver(), - ChainIDVal: provisional.TestChainID, - SharedConfigVal: &mockconfig.Orderer{BatchTimeoutVal: batchTimeout}, - } - defer close(cs.BlockCutterVal.Block) - - lastPersistedOffset := testOldestOffset - 1 - nextProducedOffset := lastPersistedOffset + 1 - co := mockNewConsenter(t, testConf.Kafka.Version, testConf.Kafka.Retry, nextProducedOffset) - ch := newChain(co, cs, lastPersistedOffset) - - go ch.Start() - defer ch.Halt() - - prepareMockObjectDisks(t, co, ch) - - // The second message that will be picked up is the time-to-cut message - // that will be posted when the short timer expires - waitableSyncQueueMessage(newTestEnvelope("one"), 2, &wg, co, cs, ch) - - select { - case <-cs.Batches: // This is the success path - case <-time.After(testTimePadding): - t.Fatal("Expected block to be cut because batch timer expired") - } - - // Stop the loop - ch.Halt() - - select { - case <-cs.Batches: - t.Fatal("Expected no invocations of Append") - case <-ch.haltedChan: // If we're here, we definitely had a chance to invoke Append but didn't (which is great) - } - - lastBlock := cs.WriteBlockVal - metadata, err := utils.GetMetadataFromBlock(lastBlock, cb.BlockMetadataIndex_ORDERER) - if err != nil { - logger.Fatalf("Error extracting orderer metadata for chain %x: %s", cs.ChainIDVal, err) - } - - lastPersistedOffset = getLastOffsetPersisted(metadata, ch.support.ChainID()) - nextProducedOffset = lastPersistedOffset + 1 - - co = mockNewConsenter(t, testConf.Kafka.Version, testConf.Kafka.Retry, nextProducedOffset) - ch = newChain(co, cs, lastPersistedOffset) - go ch.Start() - prepareMockObjectDisks(t, co, ch) - - actual := ch.producer.(*mockProducerImpl).producedOffset - if actual != nextProducedOffset { - t.Fatalf("Restarted orderer post-connect should have been at offset %d, got %d instead", nextProducedOffset, actual) - } -} diff --git a/orderer/kafka/producer.go b/orderer/kafka/producer.go deleted file mode 100644 index 1f7fe848fc2..00000000000 --- a/orderer/kafka/producer.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kafka - -import ( - "time" - - "github.com/Shopify/sarama" - "github.com/hyperledger/fabric/orderer/localconfig" -) - -// Producer allows the caller to post blobs to a chain partition on the Kafka cluster. -type Producer interface { - Send(cp ChainPartition, payload []byte) error - Closeable -} - -type producerImpl struct { - producer sarama.SyncProducer -} - -func newProducer(brokers []string, kafkaVersion sarama.KafkaVersion, retryOptions config.Retry, tls config.TLS) Producer { - var p sarama.SyncProducer - var err error - brokerConfig := newBrokerConfig(kafkaVersion, rawPartition, tls) - - repeatTick := time.NewTicker(retryOptions.Period) - panicTick := time.NewTicker(retryOptions.Stop) - defer repeatTick.Stop() - defer panicTick.Stop() - -loop: - for { - select { - case <-panicTick.C: - logger.Panicf("Failed to create Kafka producer: %v", err) - case <-repeatTick.C: - logger.Debug("Connecting to Kafka cluster:", brokers) - p, err = sarama.NewSyncProducer(brokers, brokerConfig) - if err == nil { - break loop - } - } - } - - logger.Debug("Connected to the Kafka cluster") - return &producerImpl{producer: p} -} - -// Close shuts down the Producer component of the orderer. -func (p *producerImpl) Close() error { - return p.producer.Close() -} - -// Send posts a blob to a chain partition on the Kafka cluster. -func (p *producerImpl) Send(cp ChainPartition, payload []byte) error { - prt, ofs, err := p.producer.SendMessage(newProducerMessage(cp, payload)) - if prt != cp.Partition() { - // If this happens, something's up with the partitioner - logger.Warningf("[channel: %s] Blob destined for partition %d, but posted to %d instead", cp.Topic(), cp.Partition(), prt) - } - if err == nil { - logger.Debugf("[channel %s] Posted blob to the Kafka cluster (offset number: %d)", cp.Topic(), ofs) - } else { - logger.Infof("[channel %s] Failed to post blob to the Kafka cluster: %s", cp.Topic(), err) - } - return err -} diff --git a/orderer/kafka/producer_mock_test.go b/orderer/kafka/producer_mock_test.go deleted file mode 100644 index 7373b7b69cf..00000000000 --- a/orderer/kafka/producer_mock_test.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kafka - -import ( - "fmt" - "testing" - - "github.com/Shopify/sarama/mocks" - "github.com/golang/protobuf/proto" - ab "github.com/hyperledger/fabric/protos/orderer" - "github.com/hyperledger/fabric/protos/utils" -) - -type mockProducerImpl struct { - producer *mocks.SyncProducer - checker mocks.ValueChecker - - // This simulates the broker's "disk" where the producer's - // blobs for a certain chain partition eventually end up. - disk chan *ab.KafkaMessage - producedOffset int64 - isSetup chan struct{} - t *testing.T -} - -// Create a new producer whose next "Send" on ChainPartition gives you blob #offset. -func mockNewProducer(t *testing.T, cp ChainPartition, offset int64, disk chan *ab.KafkaMessage) Producer { - mp := &mockProducerImpl{ - producer: mocks.NewSyncProducer(t, nil), - checker: nil, - disk: disk, - producedOffset: 0, - isSetup: make(chan struct{}), - t: t, - } - mp.init(cp, offset) - - if mp.producedOffset == offset-1 { - close(mp.isSetup) - } else { - mp.t.Fatal("Mock producer failed to initialize itself properly") - } - - return mp -} - -func (mp *mockProducerImpl) Send(cp ChainPartition, payload []byte) error { - mp.producer.ExpectSendMessageWithCheckerFunctionAndSucceed(mp.checker) - mp.producedOffset++ // This is the offset that will be assigned to the sent message - if _, ofs, err := mp.producer.SendMessage(newProducerMessage(cp, payload)); err != nil || ofs != mp.producedOffset { - // We do NOT check the assigned partition because the mock - // producer always posts to partition 0 no matter what. - // This is a deficiency of the Kafka library that we use. - mp.t.Fatal("Mock producer not functioning as expected") - } - msg := new(ab.KafkaMessage) - if err := proto.Unmarshal(payload, msg); err != nil { - mp.t.Fatalf("Failed to unmarshal message that reached producer's disk: %s", err) - } - mp.disk <- msg // Reaches the cluster's disk for that chain partition - return nil -} - -func (mp *mockProducerImpl) Close() error { - return mp.producer.Close() -} - -// Initializes the mock producer by setting up the offsets. -func (mp *mockProducerImpl) init(cp ChainPartition, offset int64) { - if offset >= testOldestOffset && offset <= (testNewestOffset-1) { - // Prepare the producer so that the next Send - // on that chain partition gives you blob #offset. - mp.testFillWithBlocks(cp, offset-1) - } else { - logger.Panicf("Out of range offset (seek number) given to producer: %d", offset) - } -} - -func (mp *mockProducerImpl) testFillWithBlocks(cp ChainPartition, offset int64) { - dieChan := make(chan struct{}) - deadChan := make(chan struct{}) - - go func() { // This goroutine is meant to read only the "fill-in" blocks - for { - select { - case <-mp.disk: - case <-dieChan: - close(deadChan) - return - } - } - }() - - for i := int64(1); i <= offset; i++ { - mp.Send(cp, utils.MarshalOrPanic(newRegularMessage(utils.MarshalOrPanic(newTestEnvelope(fmt.Sprintf("producer fill-in %d", i)))))) - } - - close(dieChan) - <-deadChan - - return -} diff --git a/orderer/kafka/util.go b/orderer/kafka/util.go deleted file mode 100644 index b4fb2471b74..00000000000 --- a/orderer/kafka/util.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kafka - -import ( - "crypto/tls" - "crypto/x509" - "strconv" - - "github.com/Shopify/sarama" - "github.com/hyperledger/fabric/orderer/localconfig" - ab "github.com/hyperledger/fabric/protos/orderer" -) - -func newBrokerConfig(kafkaVersion sarama.KafkaVersion, chosenStaticPartition int32, tlsConfig config.TLS) *sarama.Config { - brokerConfig := sarama.NewConfig() - - brokerConfig.Consumer.Return.Errors = true - - brokerConfig.Net.TLS.Enable = tlsConfig.Enabled - if brokerConfig.Net.TLS.Enable { - // create public/private key pair structure - keyPair, err := tls.X509KeyPair([]byte(tlsConfig.Certificate), []byte(tlsConfig.PrivateKey)) - if err != nil { - logger.Panicf("Unable to decode public/private key pair: %s", err) - } - // create root CA pool - rootCAs := x509.NewCertPool() - for _, certificate := range tlsConfig.RootCAs { - if !rootCAs.AppendCertsFromPEM([]byte(certificate)) { - logger.Panic("Unable to parse the root certificate authority certificates (Kafka.Tls.RootCAs)") - } - } - brokerConfig.Net.TLS.Config = &tls.Config{ - Certificates: []tls.Certificate{keyPair}, - RootCAs: rootCAs, - MinVersion: tls.VersionTLS12, - MaxVersion: 0, // Latest supported TLS version - } - } - - // Set equivalent of Kafka producer config max.request.bytes to the default - // value of a Kafka broker's socket.request.max.bytes property (100 MiB). - brokerConfig.Producer.MaxMessageBytes = int(sarama.MaxRequestSize) - // A partitioner is actually not needed the way we do things now, - // but we're adding it now to allow for flexibility in the future. - brokerConfig.Producer.Partitioner = newStaticPartitioner(chosenStaticPartition) - // Set the level of acknowledgement reliability needed from the broker. - // WaitForAll means that the partition leader will wait till all ISRs - // got the message before sending back an ACK to the sender. - brokerConfig.Producer.RequiredAcks = sarama.WaitForAll - // An esoteric setting required by the sarama library, see: - // https://github.com/Shopify/sarama/issues/816 - brokerConfig.Producer.Return.Successes = true - - brokerConfig.Version = kafkaVersion - - return brokerConfig -} - -func newConnectMessage() *ab.KafkaMessage { - return &ab.KafkaMessage{ - Type: &ab.KafkaMessage_Connect{ - Connect: &ab.KafkaMessageConnect{ - Payload: nil, - }, - }, - } -} - -func newRegularMessage(payload []byte) *ab.KafkaMessage { - return &ab.KafkaMessage{ - Type: &ab.KafkaMessage_Regular{ - Regular: &ab.KafkaMessageRegular{ - Payload: payload, - }, - }, - } -} - -func newTimeToCutMessage(blockNumber uint64) *ab.KafkaMessage { - return &ab.KafkaMessage{ - Type: &ab.KafkaMessage_TimeToCut{ - TimeToCut: &ab.KafkaMessageTimeToCut{ - BlockNumber: blockNumber, - }, - }, - } -} - -func newProducerMessage(cp ChainPartition, payload []byte) *sarama.ProducerMessage { - return &sarama.ProducerMessage{ - Topic: cp.Topic(), - Key: sarama.StringEncoder(strconv.Itoa(int(cp.Partition()))), // TODO Consider writing an IntEncoder? - Value: sarama.ByteEncoder(payload), - } -} - -func newOffsetReq(cp ChainPartition, offset int64) *sarama.OffsetRequest { - req := &sarama.OffsetRequest{} - // If offset (seek) == -1, ask for the offset assigned to next new message. - // If offset (seek) == -2, ask for the earliest available offset. - // The last parameter in the AddBlock call is needed for God-knows-why reasons. - // From the Kafka folks themselves: "We agree that this API is slightly funky." - // https://mail-archives.apache.org/mod_mbox/kafka-users/201411.mbox/%3Cc159383825e04129b77253ffd6c448aa@BY2PR02MB505.namprd02.prod.outlook.com%3E - req.AddBlock(cp.Topic(), cp.Partition(), offset, 1) - return req -} diff --git a/orderer/kafka/util_test.go b/orderer/kafka/util_test.go deleted file mode 100644 index d8ecc1e4f38..00000000000 --- a/orderer/kafka/util_test.go +++ /dev/null @@ -1,183 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kafka - -import ( - "crypto/tls" - "testing" - - "github.com/Shopify/sarama" - "github.com/hyperledger/fabric/common/configtx/tool/provisional" - "github.com/hyperledger/fabric/orderer/localconfig" - "github.com/hyperledger/fabric/orderer/mocks/util" - "github.com/stretchr/testify/assert" -) - -func TestProducerConfigMessageMaxBytes(t *testing.T) { - broker := sarama.NewMockBroker(t, 1) - defer func() { - broker.Close() - }() - broker.SetHandlerByMap(map[string]sarama.MockResponse{ - "MetadataRequest": sarama.NewMockMetadataResponse(t). - SetBroker(broker.Addr(), broker.BrokerID()). - SetLeader(cp.Topic(), cp.Partition(), broker.BrokerID()), - "ProduceRequest": sarama.NewMockProduceResponse(t), - }) - - mockTLS := config.TLS{Enabled: false} - config := newBrokerConfig(testConf.Kafka.Version, rawPartition, mockTLS) - producer, err := sarama.NewSyncProducer([]string{broker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - defer func() { - producer.Close() - }() - - testCases := []struct { - name string - size int - err error - }{ - {"TypicalDeploy", 8 * 1024 * 1024, nil}, - {"TooBig", 100*1024*1024 + 1, sarama.ErrMessageSizeTooLarge}, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - _, _, err = producer.SendMessage(&sarama.ProducerMessage{ - Topic: cp.Topic(), - Value: sarama.ByteEncoder(make([]byte, tc.size)), - }) - if err != tc.err { - t.Fatal(err) - } - }) - - } -} - -func TestNewBrokerConfig(t *testing.T) { - // Use a partition ID that is not the 'default' (rawPartition) - var differentPartition int32 = 2 - cp = newChainPartition(provisional.TestChainID, differentPartition) - - // Setup a mock broker that reports that it has 3 partitions for the topic - broker := sarama.NewMockBroker(t, 1) - defer func() { - broker.Close() - }() - broker.SetHandlerByMap(map[string]sarama.MockResponse{ - "MetadataRequest": sarama.NewMockMetadataResponse(t). - SetBroker(broker.Addr(), broker.BrokerID()). - SetLeader(cp.Topic(), 0, broker.BrokerID()). - SetLeader(cp.Topic(), 1, broker.BrokerID()). - SetLeader(cp.Topic(), 2, broker.BrokerID()), - "ProduceRequest": sarama.NewMockProduceResponse(t), - }) - - config := newBrokerConfig(testConf.Kafka.Version, differentPartition, config.TLS{Enabled: false}) - producer, err := sarama.NewSyncProducer([]string{broker.Addr()}, config) - if err != nil { - t.Fatal("Failed to create producer:", err) - } - defer func() { - producer.Close() - }() - - for i := 0; i < 10; i++ { - assignedPartition, _, err := producer.SendMessage(&sarama.ProducerMessage{Topic: cp.Topic()}) - if err != nil { - t.Fatal("Failed to send message:", err) - } - if assignedPartition != differentPartition { - t.Fatalf("Message wasn't posted to the right partition - expected: %d, got %v", differentPartition, assignedPartition) - } - } -} - -func TestTLSConfigEnabled(t *testing.T) { - publicKey, privateKey, _ := util.GenerateMockPublicPrivateKeyPairPEM(false) - caPublicKey, _, _ := util.GenerateMockPublicPrivateKeyPairPEM(true) - - config := newBrokerConfig(testConf.Kafka.Version, 0, config.TLS{ - Enabled: true, - PrivateKey: privateKey, - Certificate: publicKey, - RootCAs: []string{caPublicKey}, - }) - - assert.True(t, config.Net.TLS.Enable) - assert.NotNil(t, config.Net.TLS.Config) - assert.Len(t, config.Net.TLS.Config.Certificates, 1) - assert.Len(t, config.Net.TLS.Config.RootCAs.Subjects(), 1) - assert.Equal(t, uint16(0), config.Net.TLS.Config.MaxVersion) - assert.Equal(t, uint16(tls.VersionTLS12), config.Net.TLS.Config.MinVersion) -} - -func TestTLSConfigDisabled(t *testing.T) { - publicKey, privateKey, _ := util.GenerateMockPublicPrivateKeyPairPEM(false) - caPublicKey, _, _ := util.GenerateMockPublicPrivateKeyPairPEM(true) - - config := newBrokerConfig(testConf.Kafka.Version, 0, config.TLS{ - Enabled: false, - PrivateKey: privateKey, - Certificate: publicKey, - RootCAs: []string{caPublicKey}, - }) - - assert.False(t, config.Net.TLS.Enable) - assert.Zero(t, config.Net.TLS.Config) - -} - -func TestTLSConfigBadCert(t *testing.T) { - publicKey, privateKey, _ := util.GenerateMockPublicPrivateKeyPairPEM(false) - caPublicKey, _, _ := util.GenerateMockPublicPrivateKeyPairPEM(true) - - t.Run("BadPrivateKey", func(t *testing.T) { - assert.Panics(t, func() { - newBrokerConfig(testConf.Kafka.Version, 0, config.TLS{ - Enabled: true, - PrivateKey: privateKey, - Certificate: "TRASH", - RootCAs: []string{caPublicKey}, - }) - }) - }) - t.Run("BadPublicKey", func(t *testing.T) { - assert.Panics(t, func() { - newBrokerConfig(testConf.Kafka.Version, 0, config.TLS{ - Enabled: true, - PrivateKey: "TRASH", - Certificate: publicKey, - RootCAs: []string{caPublicKey}, - }) - }) - }) - t.Run("BadRootCAs", func(t *testing.T) { - assert.Panics(t, func() { - newBrokerConfig(testConf.Kafka.Version, 0, config.TLS{ - Enabled: true, - PrivateKey: privateKey, - Certificate: publicKey, - RootCAs: []string{"TRASH"}, - }) - }) - }) -} diff --git a/orderer/main.go b/orderer/main.go index 65ed9324ad4..ffa3ac48370 100644 --- a/orderer/main.go +++ b/orderer/main.go @@ -66,7 +66,7 @@ func main() { func initializeLoggingLevel(conf *config.TopLevel) { flogging.InitFromSpec(conf.General.LogLevel) if conf.Kafka.Verbose { - sarama.Logger = log.New(os.Stdout, "[sarama] ", log.Lshortfile) + sarama.Logger = log.New(os.Stdout, "[sarama] ", log.Ldate|log.Lmicroseconds|log.Lshortfile) } } @@ -192,7 +192,7 @@ func initializeMultiChainManager(conf *config.TopLevel, signer crypto.LocalSigne consenters := make(map[string]multichain.Consenter) consenters["solo"] = solo.New() - consenters["kafka"] = kafka.New(conf.Kafka.Version, conf.Kafka.Retry, conf.Kafka.TLS) + consenters["kafka"] = kafka.New(conf.Kafka.TLS, conf.Kafka.Retry, conf.Kafka.Version) return multichain.NewManagerImpl(lf, consenters, signer) } diff --git a/orderer/mocks/multichain/multichain.go b/orderer/mocks/multichain/multichain.go index 0b51a7504fb..b0bf476c512 100644 --- a/orderer/mocks/multichain/multichain.go +++ b/orderer/mocks/multichain/multichain.go @@ -85,12 +85,12 @@ func (mcs *ConsenterSupport) WriteBlock(block *cb.Block, _committers []filter.Co for i := range block.Data.Data { umtxs[i] = utils.UnmarshalEnvelopeOrPanic(block.Data.Data[i]) } - mcs.Batches <- umtxs mcs.HeightVal++ if encodedMetadataValue != nil { block.Metadata.Metadata[cb.BlockMetadataIndex_ORDERER] = utils.MarshalOrPanic(&cb.Metadata{Value: encodedMetadataValue}) } mcs.WriteBlockVal = block + mcs.Batches <- umtxs return block }