diff --git a/devenv/Vagrantfile b/devenv/Vagrantfile index 0f9ac444c42..2fe01eec077 100644 --- a/devenv/Vagrantfile +++ b/devenv/Vagrantfile @@ -34,7 +34,6 @@ Vagrant.configure('2') do |config| config.vm.box = "hyperledger/fabric-baseimage" config.vm.box_version = ENV['USE_LOCAL_BASEIMAGE'] ? "0": baseimage_release # Vagrant does not support versioning local images, the local version is always implicitly version 0 - config.vm.network :forwarded_port, guest: 7050, host: 7050 # Openchain REST services config.vm.network :forwarded_port, guest: 7051, host: 7051 # Openchain gRPC services config.vm.network :forwarded_port, guest: 7054, host: 7054 # Membership service config.vm.network :forwarded_port, guest: 7053, host: 7053 # GRPCCient gRPC services diff --git a/examples/chaincode/go/utxo/util/utxo_test.go b/examples/chaincode/go/utxo/util/utxo_test.go index f9769a0ba7c..ec979c3d279 100644 --- a/examples/chaincode/go/utxo/util/utxo_test.go +++ b/examples/chaincode/go/utxo/util/utxo_test.go @@ -31,8 +31,7 @@ import ( // func TestMain(m *testing.M) { // // viper.Set("ledger.blockchain.deploy-system-chaincode", "false") -// // viper.Set("peer.validator.validity-period.verification", "false") - +// // os.Exit(m.Run()) // } diff --git a/peer/core.yaml b/peer/core.yaml index 7228f882a7b..52d97635a48 100644 --- a/peer/core.yaml +++ b/peer/core.yaml @@ -1,37 +1,3 @@ -############################################################################### -# -# -# CLI section -# -############################################################################### -cli: - - # The address that the cli process will use for callbacks from chaincodes - address: 0.0.0.0:7052 - - - -############################################################################### -# -# REST section -# -############################################################################### -rest: - - # Enable/disable setting for the REST service. It is recommended to disable - # REST service on validators in production deployment and use non-validating - # nodes to host REST service - enabled: false - - # The address that the REST service will listen on for incoming requests. - address: 0.0.0.0:7050 - - validPatterns: - - # Valid enrollment ID pattern in URLs: At least one character long, and - # all characters are A-Z, a-z, 0-9 or _. - enrollmentID: '^\w+$' - ############################################################################### # # LOGGING section @@ -63,14 +29,13 @@ logging: # # Developers: Please see fabric/docs/Setup/logging-control.md for more # options. - peer: warning - - node: info - network: warning - chaincode: warning - version: warning + peer: warning + node: info + network: warning + chaincode: warning + version: warning protoutils: debug - error: warning + error: warning format: '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}' @@ -84,9 +49,6 @@ peer: # The Peer id is used for identifying this Peer instance. id: jdoe - # The privateKey to be used by this peer - # privateKey: 794ef087680e2494fa4918fd8fb80fb284b50b57d321a31423fe42b9ccf6216047cea0b66fe8365a8e3f2a8140c6866cc45852e63124668bee1daa9c97da0c2a - # The networkId allows for logical seperation of networks # networkId: dev # networkId: test @@ -139,30 +101,19 @@ peer: # Validator defines whether this peer is a validating peer or not, and if # it is enabled, what consensus plugin to load - validator: - enabled: true - - consensus: - # Consensus plugin to use. The value is the name of the plugin, e.g. pbft, noops ( this value is case-insensitive) - # if the given value is not recognized, we will default to noops - plugin: noops - - # total number of consensus messages which will be buffered per connection before delivery is rejected - buffersize: 1000 + events: + # The address that the Event service will be enabled on the validator + address: 0.0.0.0:7053 - events: - # The address that the Event service will be enabled on the validator - address: 0.0.0.0:7053 + # total number of events that could be buffered without blocking the + # validator sends + buffersize: 100 - # total number of events that could be buffered without blocking the - # validator sends - buffersize: 100 - - # milliseconds timeout for producer to send an event. - # if < 0, if buffer full, unblocks immediately and not send - # if 0, if buffer full, will block and guarantee the event will be sent out - # if > 0, if buffer full, blocks till timeout - timeout: 10 + # milliseconds timeout for producer to send an event. + # if < 0, if buffer full, unblocks immediately and not send + # if 0, if buffer full, will block and guarantee the event will be sent out + # if > 0, if buffer full, blocks till timeout + timeout: 10 # ----!!!!IMPORTANT!!!-!!!IMPORTANT!!!-!!!IMPORTANT!!!!---- # THIS HAS TO BE DONE IN THE CONTEXT OF BOOTSTRAP. TILL THAT @@ -194,57 +145,14 @@ peer: # The server name use to verify the hostname returned by TLS handshake serverhostoverride: - # PKI member services properties - pki: - eca: - paddr: localhost:7054 - tca: - paddr: localhost:7054 - tlsca: - paddr: localhost:7054 - tls: - enabled: false - rootcert: - file: tlsca.cert - # The server name use to verify the hostname returned by TLS handshake - serverhostoverride: - - # Peer discovery settings. Controls how this peer discovers other peers - discovery: - - # The root nodes are used for bootstrapping purposes, and generally - # supplied through ENV variables - # It can be either a single host or a comma separated list of hosts. - rootnode: - - # The duration of time between attempts to asks peers for their connected peers - period: 5s - - ## leaving this in for example of sub map entry - # testNodes: - # - node : 1 - # ip : 127.0.0.1 - # port : 7051 - # - node : 2 - # ip : 127.0.0.1 - # port : 7051 - - # Should the discovered nodes and their reputations - # be stored in DB and persisted between restarts - persist: true - - # the period in seconds with which the discovery - # tries to reconnect to successful nodes - # 0 means the nodes are not reconnected - touchPeriod: 6s - - # the maximum nuber of nodes to reconnect to - # -1 for unlimited - touchMaxNodes: 100 - - # Path on the file system where peer will store data + # Path on the file system where peer will store data (eg ledger) fileSystemPath: /var/hyperledger/production + # Path on the file system where peer will find MSP local configurations + mspConfigPath: /var/hyperledger/msp + + # Used with Go profiling tools only in none production environment. In + # production, it should be disabled (eg enabled: false) profile: enabled: false listenAddress: 0.0.0.0:6060 @@ -290,6 +198,7 @@ vm: max-size: "50m" max-file: "5" Memory: 2147483648 + ############################################################################### # # Chaincode section @@ -362,8 +271,7 @@ chaincode: lccc: enable escc: enable vscc: enable -############################################################################### -# + ############################################################################### # # Ledger section - ledger configuration encompases both the blockchain @@ -375,41 +283,6 @@ ledger: blockchain: state: - - # Control the number state deltas that are maintained. This takes additional - # disk space, but allow the state to be rolled backwards and forwards - # without the need to replay transactions. - deltaHistorySize: 500 - - # The data structure in which the state will be stored. Different data - # structures may offer different performance characteristics. - # Options are 'buckettree', 'trie' and 'raw'. - # ( Note:'raw' is experimental and incomplete. ) - # If not set, the default data structure is the 'buckettree'. - # This CANNOT be changed after the DB has been created. - dataStructure: - # The name of the data structure is for storing the state - name: buckettree - # The data structure specific configurations - configs: - # configurations for 'bucketree'. These CANNOT be changed after the DB - # has been created. 'numBuckets' defines the number of bins that the - # state key-values are to be divided - numBuckets: 1000003 - # 'maxGroupingAtEachLevel' defines the number of bins that are grouped - #together to construct next level of the merkle-tree (this is applied - # repeatedly for constructing the entire tree). - maxGroupingAtEachLevel: 5 - # 'bucketCacheSize' defines the size (in MBs) of the cache that is used to keep - # the buckets (from root upto secondlast level) in memory. This cache helps - # in making state hash computation faster. A value less than or equals to zero - # leads to disabling this caching. This caching helps more if transactions - # perform significant writes. - bucketCacheSize: 100 - - # configurations for 'trie' - # 'tire' has no additional configurations exposed as yet - # stateDatabase - options are "goleveldb", "CouchDB" # goleveldb - default state database stored in goleveldb. # CouchDB - store state database in CouchDB @@ -436,45 +309,3 @@ security: # Can be SHA2 or SHA3. hashAlgorithm: SHA2 - -################################################################################ -# -# SECTION: STATETRANSFER -# -# - This applies to recovery behavior when the replica has detected -# a state transfer is required -# -# - This might happen: -# - During a view change in response to a faulty primary -# - After a network outage which has isolated the replica -# - If the current blockchain/state is determined to be corrupt -# -################################################################################ -statetransfer: - - # Should a replica attempt to fix damaged blocks? - # In general, this should be set to true, setting to false will cause - # the replica to panic, and require a human's intervention to intervene - # and fix the corruption - recoverdamage: true - - # The number of blocks to retrieve per sync request - blocksperrequest: 20 - - # The maximum number of state deltas to attempt to retrieve - # If more than this number of deltas is required to play the state up to date - # then instead the state will be flagged as invalid, and a full copy of the state - # will be retrieved instead - maxdeltas: 200 - - # Timeouts - timeout: - - # How long may returning a single block take - singleblock: 2s - - # How long may returning a single state delta take - singlestatedelta: 2s - - # How long may transferring the complete state take - fullstate: 60s diff --git a/peer/node/start.go b/peer/node/start.go index df145e4fae2..70ff45117a9 100755 --- a/peer/node/start.go +++ b/peer/node/start.go @@ -83,11 +83,8 @@ func serve(args []string) error { // cached. Failures to cache cause the server to terminate immediately. if chaincodeDevMode { logger.Info("Running in chaincode development mode") - logger.Info("Set consensus to NOOPS and user starts chaincode") logger.Info("Disable loading validity system chaincode") - viper.Set("peer.validator.enabled", "true") - viper.Set("peer.validator.consensus", "noops") viper.Set("chaincode.mode", chaincode.DevModeUserRunsChaincode) } @@ -185,9 +182,8 @@ func serve(args []string) error { defer noopssinglechain.StopDeliveryService(deliverService) } - logger.Infof("Starting peer with ID=%s, network ID=%s, address=%s, rootnodes=%v, validator=%v", - peerEndpoint.ID, viper.GetString("peer.networkId"), peerEndpoint.Address, - viper.GetString("peer.discovery.rootnode"), peer.ValidatorEnabled()) + logger.Infof("Starting peer with ID=%s, network ID=%s, address=%s", + peerEndpoint.ID, viper.GetString("peer.networkId"), peerEndpoint.Address) // Start the grpc server. Done in a goroutine so we can deploy the // genesis block if needed. @@ -221,6 +217,7 @@ func serve(args []string) error { go ehubGrpcServer.Serve(ehubLis) } + // Start profiling http endpoint if enabled if viper.GetBool("peer.profile.enabled") { go func() { profileListenAddress := viper.GetString("peer.profile.listenAddress") @@ -270,32 +267,30 @@ func createEventHubServer() (net.Listener, *grpc.Server, error) { var lis net.Listener var grpcServer *grpc.Server var err error - if peer.ValidatorEnabled() { - lis, err = net.Listen("tcp", viper.GetString("peer.validator.events.address")) - if err != nil { - return nil, nil, fmt.Errorf("failed to listen: %v", err) - } + lis, err = net.Listen("tcp", viper.GetString("peer.events.address")) + if err != nil { + return nil, nil, fmt.Errorf("failed to listen: %v", err) + } - //TODO - do we need different SSL material for events ? - var opts []grpc.ServerOption - if comm.TLSEnabled() { - creds, err := credentials.NewServerTLSFromFile( - viper.GetString("peer.tls.cert.file"), - viper.GetString("peer.tls.key.file")) + //TODO - do we need different SSL material for events ? + var opts []grpc.ServerOption + if comm.TLSEnabled() { + creds, err := credentials.NewServerTLSFromFile( + viper.GetString("peer.tls.cert.file"), + viper.GetString("peer.tls.key.file")) - if err != nil { - return nil, nil, fmt.Errorf("Failed to generate credentials %v", err) - } - opts = []grpc.ServerOption{grpc.Creds(creds)} + if err != nil { + return nil, nil, fmt.Errorf("Failed to generate credentials %v", err) } + opts = []grpc.ServerOption{grpc.Creds(creds)} + } - grpcServer = grpc.NewServer(opts...) - ehServer := producer.NewEventsServer( - uint(viper.GetInt("peer.validator.events.buffersize")), - viper.GetInt("peer.validator.events.timeout")) + grpcServer = grpc.NewServer(opts...) + ehServer := producer.NewEventsServer( + uint(viper.GetInt("peer.events.buffersize")), + viper.GetInt("peer.events.timeout")) - pb.RegisterEventsServer(grpcServer, ehServer) - } + pb.RegisterEventsServer(grpcServer, ehServer) return lis, grpcServer, err }