From c20443bd584bc6c3ad18217b16ca0b550fbb30e1 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 11 Mar 2019 20:38:22 -0500 Subject: [PATCH 01/94] Turning off bnet long running tests to allow other long running tests to complete in the 1 hour timeslot. --- tests/CMakeLists.txt | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 68116bab863..f5a3b90fe76 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -84,8 +84,6 @@ add_test(NAME db_modes_test COMMAND tests/db_modes_test.sh WORKING_DIRECTORY ${C # Long running tests add_test(NAME nodeos_sanity_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_sanity_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_sanity_bnet_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_sanity_bnet_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_run_check_lr_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_run_check_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_remote_lr_test COMMAND tests/nodeos_run_remote_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) @@ -100,9 +98,6 @@ set_property(TEST nodeos_forked_chain_lr_test PROPERTY LABELS long_running_tests add_test(NAME nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9902 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_voting_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_voting_bnet_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9903 --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_voting_bnet_lr_test PROPERTY LABELS long_running_tests) - add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_avail_ram.py -v --wallet-port 9904 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_running_tests) From b0b4fdae2f3514b325a49d5a2494e784f2fb66ad Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 22 Mar 2019 13:40:05 -0400 Subject: [PATCH 02/94] Remove setting CMAKE_OSX_SYSROOT Setting CMAKE_OSX_SYSROOT has shown to cause build failures on fresh macos 10.13 installs --- CMakeLists.txt | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f9375f0f8b9..00258c4b86d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -14,14 +14,8 @@ endif() list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/libraries/fc/CMakeModules") list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules") -if (UNIX) - if (APPLE) - execute_process(COMMAND xcrun --show-sdk-path - OUTPUT_VARIABLE CMAKE_OSX_SYSROOT - OUTPUT_STRIP_TRAILING_WHITESPACE) - list(APPEND CMAKE_PREFIX_PATH "/usr/local/opt/llvm@4") - list(APPEND CMAKE_PREFIX_PATH "/usr/local/opt/gettext") - endif() +if (UNIX AND APPLE) + list(APPEND CMAKE_PREFIX_PATH "/usr/local/opt/llvm@4" "/usr/local/opt/gettext") endif() include( GNUInstallDirs ) From cb98d8f756098547b8a35186c49cd8f138cbeee1 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 22 Feb 2019 08:10:05 -0600 Subject: [PATCH 03/94] Added ability to configure nodes that are not launched immediately. --- programs/eosio-launcher/main.cpp | 53 +++++++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 8a3a75a721b..066305e4122 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -247,6 +247,7 @@ class tn_node_def { vector producers; eosd_def* instance; string gelf_endpoint; + bool dont_start; }; void @@ -390,6 +391,8 @@ string producer_names::producer_name(unsigned int producer_number) { struct launcher_def { bool force_overwrite; size_t total_nodes; + size_t unstarted_nodes; + size_t total_nodes; size_t prod_nodes; size_t producers; size_t next_node; @@ -481,6 +484,7 @@ launcher_def::set_options (bpo::options_description &cfg) { cfg.add_options() ("force,f", bpo::bool_switch(&force_overwrite)->default_value(false), "Force overwrite of existing configuration files and erase blockchain") ("nodes,n",bpo::value(&total_nodes)->default_value(1),"total number of nodes to configure and launch") + ("unstarted-nodes",bpo::value(&unstarted_nodes)->default_value(0),"total number of nodes to configure, but not launch") ("pnodes,p",bpo::value(&prod_nodes)->default_value(1),"number of nodes that contain one or more producers") ("producers",bpo::value(&producers)->default_value(21),"total number of non-bios producer instances in this network") ("mode,m",bpo::value>()->multitoken()->default_value({"any"}, "any"),"connection mode, combination of \"any\", \"producers\", \"specified\", \"none\"") @@ -634,7 +638,31 @@ launcher_def::initialize (const variables_map &vmap) { if (prod_nodes > (producers + 1)) prod_nodes = producers; if (prod_nodes > total_nodes) - total_nodes = prod_nodes; + total_nodes = prod_nodes + unstarted_nodes; + else if (total_nodes < prod_nodes + unstarted_nodes) { + cerr << "ERROR: if provided, \"--nodes\" must be equal or greater than the number of nodes indicated by \"--pnodes\" and \"--unstarted-nodes\"." << endl; + exit (-1); + } + + if (vmap.count("specific-num")) { + const auto specific_nums = vmap["specific-num"].as>(); + const auto specific_args = vmap["specific-nodeos"].as>(); + if (specific_nums.size() != specific_args.size()) { + cerr << "ERROR: every specific-num argument must be paired with a specific-nodeos argument" << endl; + exit (-1); + } + // don't include bios + const auto allowed_nums = total_nodes - 1; + for(uint i = 0; i < specific_nums.size(); ++i) + { + const auto& num = specific_nums[i]; + if (num >= allowed_nums) { + cerr << "\"--specific-num\" provided value= " << num << " is higher than \"--nodes\" provided value=" << total_nodes << endl; + exit (-1); + } + specific_nodeos_args[num] = specific_args[i]; + } + } char* erd_env_var = getenv ("EOSIO_HOME"); if (erd_env_var == nullptr || std::string(erd_env_var).empty()) { @@ -733,7 +761,7 @@ launcher_def::generate () { write_dot_file (); if (!output.empty()) { - bfs::path savefile = output; + bfs::path savefile = output; { bfs::ofstream sf (savefile); sf << fc::json::to_pretty_string (network) << endl; @@ -754,6 +782,7 @@ launcher_def::generate () { } return false; } + return true; } @@ -864,6 +893,7 @@ launcher_def::bind_nodes () { int extra = producers % non_bios; unsigned int i = 0; unsigned int producer_number = 0; + const auto to_not_start_node = total_nodes - unstarted_nodes - 1; for (auto &h : bindings) { for (auto &inst : h.instances) { bool is_bios = inst.name == "bios"; @@ -894,6 +924,7 @@ launcher_def::bind_nodes () { ++producer_number; } } + node.dont_start = i >= to_not_start_node; } node.gelf_endpoint = gelf_endpoint; network.nodes[node.name] = move(node); @@ -1564,6 +1595,10 @@ launcher_def::launch (eosd_def &instance, string >s) { } if (!host->is_local()) { + if (instance.node->dont_start) { + cerr << "Unable to use \"unstarted-nodes\" with a remote hose" << endl; + exit (-1); + } string cmdl ("cd "); cmdl += host->eosio_home + "; nohup " + eosdcmd + " > " + reout.string() + " 2> " + reerr.string() + "& echo $! > " + pidf.string() @@ -1578,7 +1613,7 @@ launcher_def::launch (eosd_def &instance, string >s) { string cmd = "cd " + host->eosio_home + "; kill -15 $(cat " + pidf.string() + ")"; format_ssh (cmd, host->host_name, info.kill_cmd); } - else { + else if (!instance.node->dont_start) { cerr << "spawning child, " << eosdcmd << endl; bp::child c(eosdcmd, bp::std_out > reout, bp::std_err > reerr ); @@ -1600,6 +1635,16 @@ launcher_def::launch (eosd_def &instance, string >s) { } c.detach(); } + else { + cerr << "not spawning child, " << eosdcmd << endl; + + const bfs::path dd = instance.data_dir_name; + const bfs::path start_file = dd / "start.cmd"; + bfs::ofstream sf (start_file); + + sf << eosdcmd << endl; + sf.close(); + } last_run.running_nodes.emplace_back (move(info)); } @@ -2046,7 +2091,7 @@ FC_REFLECT( eosd_def, (p2p_endpoint) ) // @ignore instance, gelf_endpoint -FC_REFLECT( tn_node_def, (name)(keys)(peers)(producers) ) +FC_REFLECT( tn_node_def, (name)(keys)(peers)(producers)(dont_start) ) FC_REFLECT( testnet_def, (name)(ssh_helper)(nodes) ) From 328473c9af665c780bf00163cb58e03e85e5d7b4 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 22 Feb 2019 11:42:31 -0600 Subject: [PATCH 04/94] Cleanup of scripts. --- tests/Cluster.py | 63 ++++++++++--------- tests/Node.py | 3 +- ...onsensus-validation-malicious-producers.py | 2 +- tests/distributed-transactions-test.py | 2 +- tests/testUtils.py | 22 ++++++- 5 files changed, 58 insertions(+), 34 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index d41d8e8731d..2c2486d48bf 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -31,7 +31,6 @@ class Cluster(object): __LauncherCmdArr=[] __bootlog="eosio-ignition-wd/bootlog.txt" __configDir="etc/eosio/" - __dataDir="var/lib/" # pylint: disable=too-many-arguments # walletd [True|False] Is keosd running. If not load the wallet plugin @@ -128,11 +127,12 @@ def setAlternateVersionLabels(self, file): # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches # pylint: disable=too-many-statements - def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, + def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None, alternateVersionLabelsFile=None, associatedNodeLabels=None): """Launch cluster. pnodes: producer nodes count + unstartedNodes: non-producer nodes that are configured into the launch, but not started totalNodes: producer + non-producer nodes count prodCount: producers per producer node count topo: cluster topology (as defined by launcher, and "bridge" shape that is specific to this launch method) @@ -169,6 +169,8 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne if pnodes > totalNodes: raise RuntimeError("totalNodes (%d) must be equal to or greater than pnodes(%d)." % (totalNodes, pnodes)) + if pnodes + unstartedNodes > totalNodes: + raise RuntimeError("totalNodes (%d) must be equal to or greater than pnodes(%d) + unstartedNodes(%d)." % (totalNodes, pnodes, unstartedNodes)) if self.walletMgr is None: self.walletMgr=WalletMgr(True) @@ -806,15 +808,6 @@ def nodeNameToId(name): m=re.search(r"node_([\d]+)", name) return int(m.group(1)) - @staticmethod - def nodeExtensionToName(ext): - r"""Convert node extension (bios, 0, 1, etc) to node name. """ - prefix="node_" - if ext == "bios": - return prefix + ext - - return "node_%02d" % (ext) - @staticmethod def parseProducerKeys(configFile, nodeName): """Parse node config file for producer keys. Returns dictionary. (Keys: account name; Values: dictionary objects (Keys: ["name", "node", "private","public"]; Values: account name, node id returned by nodeNameToId(nodeName), private key(string)and public key(string))).""" @@ -852,7 +845,7 @@ def parseProducerKeys(configFile, nodeName): def parseProducers(nodeNum): """Parse node config file for producers.""" - configFile=Cluster.__configDir + Cluster.nodeExtensionToName(nodeNum) + "/config.ini" + configFile=Cluster.__configDir + Utils.nodeExtensionToName(nodeNum) + "/config.ini" if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) configStr=None with open(configFile, 'r') as f: @@ -870,7 +863,7 @@ def parseProducers(nodeNum): def parseClusterKeys(totalNodes): """Parse cluster config file. Updates producer keys data members.""" - nodeName=Cluster.nodeExtensionToName("bios") + nodeName=Utils.nodeExtensionToName("bios") configFile=Cluster.__configDir + nodeName + "/config.ini" if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) producerKeys=Cluster.parseProducerKeys(configFile, nodeName) @@ -879,7 +872,7 @@ def parseClusterKeys(totalNodes): return None for i in range(0, totalNodes): - nodeName=Cluster.nodeExtensionToName(i) + nodeName=Utils.nodeExtensionToName(i) configFile=Cluster.__configDir + nodeName + "/config.ini" if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) @@ -1254,7 +1247,7 @@ def myFunc(): @staticmethod def pgrepEosServerPattern(nodeInstance): - dataLocation=Cluster.__dataDir + Cluster.nodeExtensionToName(nodeInstance) + dataLocation=Utils.getNodeDataDir(nodeInstance) return r"[\n]?(\d+) (.* --data-dir %s .*)\n" % (dataLocation) # Populates list of EosInstanceInfo objects, matched to actual running instances @@ -1272,18 +1265,30 @@ def discoverLocalNodes(self, totalNodes, timeout=None): psOutDisplay=psOut[:6660]+"..." if Utils.Debug: Utils.Print("pgrep output: \"%s\"" % psOutDisplay) for i in range(0, totalNodes): - pattern=Cluster.pgrepEosServerPattern(i) - m=re.search(pattern, psOut, re.MULTILINE) - if m is None: - Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) + instance=self.discoverLocalNode(i, psOut) + if instance is None: break - instance=Node(self.host, self.port + i, pid=int(m.group(1)), cmd=m.group(2), walletMgr=self.walletMgr, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) - if Utils.Debug: Utils.Print("Node>", instance) nodes.append(instance) if Utils.Debug: Utils.Print("Found %d nodes" % (len(nodes))) return nodes + # Populate a node matched to actual running instance + def discoverLocalNode(self, nodeNum, psOut=None): + if psOut is None: + psOut=Cluster.pgrepEosServers(timeout) + if psOut is None: + Utils.Print("ERROR: No nodes discovered.") + return nodes + pattern=Cluster.pgrepEosServerPattern(nodeNum) + m=re.search(pattern, psOut, re.MULTILINE) + if m is None: + Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) + return None + instance=Node(self.host, self.port + nodeNum, pid=int(m.group(1)), cmd=m.group(2), walletMgr=self.walletMgr, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) + if Utils.Debug: Utils.Print("Node>", instance) + return instance + def discoverBiosNodePid(self, timeout=None): psOut=Cluster.pgrepEosServers(timeout=timeout) pattern=Cluster.pgrepEosServerPattern("bios") @@ -1348,20 +1353,20 @@ def __findFiles(path): return files def dumpErrorDetails(self): - fileName=os.path.join(Cluster.__configDir + Cluster.nodeExtensionToName("bios"), "config.ini") + fileName=os.path.join(Cluster.__configDir + Utils.nodeExtensionToName("bios"), "config.ini") Cluster.dumpErrorDetailImpl(fileName) - path=Cluster.__dataDir + Cluster.nodeExtensionToName("bios") + path=Utils.getNodeDataDir("bios") fileNames=Cluster.__findFiles(path) for fileName in fileNames: Cluster.dumpErrorDetailImpl(fileName) for i in range(0, len(self.nodes)): - configLocation=Cluster.__configDir + Cluster.nodeExtensionToName(i) + configLocation=Cluster.__configDir + Utils.nodeExtensionToName(i) fileName=os.path.join(configLocation, "config.ini") Cluster.dumpErrorDetailImpl(fileName) fileName=os.path.join(configLocation, "genesis.json") Cluster.dumpErrorDetailImpl(fileName) - path=Cluster.__dataDir + Cluster.nodeExtensionToName(i) + path=Utils.getNodeDataDir(i) fileNames=Cluster.__findFiles(path) for fileName in fileNames: Cluster.dumpErrorDetailImpl(fileName) @@ -1435,7 +1440,7 @@ def waitForNextBlock(self, timeout=None): return node.waitForNextBlock(timeout) def cleanup(self): - for f in glob.glob(Cluster.__dataDir + "node_*"): + for f in glob.glob(Utils.DataDir + "node_*"): shutil.rmtree(f) for f in glob.glob(Cluster.__configDir + "node_*"): shutil.rmtree(f) @@ -1510,7 +1515,7 @@ def printBlockLogIfNeeded(self): self.printBlockLog() def getBlockLog(self, nodeExtension): - blockLogDir=Cluster.__dataDir + Cluster.nodeExtensionToName(nodeExtension) + "/blocks/" + blockLogDir=os.path.join(Utils.getNodeDataDir(nodeExtension), "blocks", "") return Utils.getBlockLog(blockLogDir, exitOnError=False) def printBlockLog(self): @@ -1600,8 +1605,8 @@ def compareCommon(blockLogs, blockNameExtensions, first, last): if Utils.Debug: Utils.Print("context=%s" % (context)) ret=Utils.compare(commonBlockLogs[0], commonBlockLogs[i], context) if ret is not None: - blockLogDir1=Cluster.__dataDir + Cluster.nodeExtensionToName(commonBlockNameExtensions[0]) + "/blocks/" - blockLogDir2=Cluster.__dataDir + Cluster.nodeExtensionToName(commonBlockNameExtensions[i]) + "/blocks/" + blockLogDir1=Utils.DataDir + Utils.nodeExtensionToName(commonBlockNameExtensions[0]) + "/blocks/" + blockLogDir2=Utils.DataDir + Utils.nodeExtensionToName(commonBlockNameExtensions[i]) + "/blocks/" Utils.Print(Utils.FileDivider) Utils.Print("Block log from %s:\n%s" % (blockLogDir1, json.dumps(commonBlockLogs[0], indent=1))) Utils.Print(Utils.FileDivider) diff --git a/tests/Node.py b/tests/Node.py index 1c01893ceca..ab0859c7b0d 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1334,8 +1334,7 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim dataDir="var/lib/node_%02d" % (nodeId) dt = datetime.datetime.now() - dateStr="%d_%02d_%02d_%02d_%02d_%02d" % ( - dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) + dateStr=Utils.getDateString(dt) stdoutFile="%s/stdout.%s.txt" % (dataDir, dateStr) stderrFile="%s/stderr.%s.txt" % (dataDir, dateStr) with open(stdoutFile, 'w') as sout, open(stderrFile, 'w') as serr: diff --git a/tests/consensus-validation-malicious-producers.py b/tests/consensus-validation-malicious-producers.py index 971228854d9..e3c6d7fe50e 100755 --- a/tests/consensus-validation-malicious-producers.py +++ b/tests/consensus-validation-malicious-producers.py @@ -246,7 +246,7 @@ def myTest(transWillEnterBlock): topo="mesh" delay=0 Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo, delay) is False: + if cluster.launch(pnodes=pnodes, total_nodes=total_nodes, topo=topo, delay=delay) is False: error("Failed to stand up eos cluster.") return False diff --git a/tests/distributed-transactions-test.py b/tests/distributed-transactions-test.py index 5b302dcf141..c3b794b89c0 100755 --- a/tests/distributed-transactions-test.py +++ b/tests/distributed-transactions-test.py @@ -63,7 +63,7 @@ (pnodes, total_nodes-pnodes, topo, delay)) Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: + if cluster.launch(pnodes=pnodes, total_nodes=total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: errorExit("Failed to stand up eos cluster.") Print ("Wait for Cluster stabilization") diff --git a/tests/testUtils.py b/tests/testUtils.py index 9e7e9c604be..38719fb8455 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -36,6 +36,7 @@ class Utils: EosBlockLogPath="programs/eosio-blocklog/eosio-blocklog" FileDivider="=================================================================" + DataDir="var/lib/" @staticmethod def Print(*args, **kwargs): @@ -65,6 +66,24 @@ def setIrreversibleTimeout(timeout): def setSystemWaitTimeout(timeout): Utils.systemWaitTimeout=timeout + @staticmethod + def getDateString(dt): + return "%d_%02d_%02d_%02d_%02d_%02d" % ( + dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) + + @staticmethod + def nodeExtensionToName(ext): + r"""Convert node extension (bios, 0, 1, etc) to node name. """ + prefix="node_" + if ext == "bios": + return prefix + ext + + return "node_%02d" % (ext) + + @staticmethod + def getNodeDataDir(ext): + return os.path.join(Utils.DataDir, Utils.nodeExtensionToName(ext)) + @staticmethod def getChainStrategies(): chainSyncStrategies={} @@ -180,7 +199,8 @@ def runCmdArrReturnJson(cmdArr, trace=False, silentErrors=True): @staticmethod def runCmdReturnStr(cmd, trace=False): - retStr=Utils.checkOutput(cmd.split()) + cmdArr=shlex.split(cmd) + retStr=Utils.checkOutput(cmdArr) if trace: Utils.Print ("RAW > %s" % (retStr)) return retStr From 5aa5835e8897fd38fd0fdffc6434bf037af7d3cc Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 25 Feb 2019 08:36:08 -0600 Subject: [PATCH 05/94] Added config dir and data dir utils methods. --- tests/Cluster.py | 19 +++++++++---------- tests/testUtils.py | 19 +++++++++++++++++-- 2 files changed, 26 insertions(+), 12 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 2c2486d48bf..0e16c803f05 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -30,7 +30,6 @@ class Cluster(object): __BiosPort=8788 __LauncherCmdArr=[] __bootlog="eosio-ignition-wd/bootlog.txt" - __configDir="etc/eosio/" # pylint: disable=too-many-arguments # walletd [True|False] Is keosd running. If not load the wallet plugin @@ -845,7 +844,7 @@ def parseProducerKeys(configFile, nodeName): def parseProducers(nodeNum): """Parse node config file for producers.""" - configFile=Cluster.__configDir + Utils.nodeExtensionToName(nodeNum) + "/config.ini" + configFile=Utils.getNodeConfigDir(nodeNum, "config.ini") if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) configStr=None with open(configFile, 'r') as f: @@ -863,19 +862,19 @@ def parseProducers(nodeNum): def parseClusterKeys(totalNodes): """Parse cluster config file. Updates producer keys data members.""" - nodeName=Utils.nodeExtensionToName("bios") - configFile=Cluster.__configDir + nodeName + "/config.ini" + configFile=Utils.getNodeConfigDir("bios", "config.ini") if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) + nodeName=Utils.nodeExtensionToName("bios") producerKeys=Cluster.parseProducerKeys(configFile, nodeName) if producerKeys is None: Utils.Print("ERROR: Failed to parse eosio private keys from cluster config files.") return None for i in range(0, totalNodes): - nodeName=Utils.nodeExtensionToName(i) - configFile=Cluster.__configDir + nodeName + "/config.ini" + configFile=Utils.getNodeConfigDir(i, "config.ini") if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) + nodeName=Utils.nodeExtensionToName(i) keys=Cluster.parseProducerKeys(configFile, nodeName) if keys is not None: producerKeys.update(keys) @@ -1353,7 +1352,7 @@ def __findFiles(path): return files def dumpErrorDetails(self): - fileName=os.path.join(Cluster.__configDir + Utils.nodeExtensionToName("bios"), "config.ini") + fileName=Utils.getNodeConfigDir("bios", "config.ini") Cluster.dumpErrorDetailImpl(fileName) path=Utils.getNodeDataDir("bios") fileNames=Cluster.__findFiles(path) @@ -1361,7 +1360,7 @@ def dumpErrorDetails(self): Cluster.dumpErrorDetailImpl(fileName) for i in range(0, len(self.nodes)): - configLocation=Cluster.__configDir + Utils.nodeExtensionToName(i) + configLocation=Utils.getNodeConfigDir(i) fileName=os.path.join(configLocation, "config.ini") Cluster.dumpErrorDetailImpl(fileName) fileName=os.path.join(configLocation, "genesis.json") @@ -1442,7 +1441,7 @@ def waitForNextBlock(self, timeout=None): def cleanup(self): for f in glob.glob(Utils.DataDir + "node_*"): shutil.rmtree(f) - for f in glob.glob(Cluster.__configDir + "node_*"): + for f in glob.glob(Utils.ConfigDir + "node_*"): shutil.rmtree(f) for f in self.filesToCleanup: @@ -1515,7 +1514,7 @@ def printBlockLogIfNeeded(self): self.printBlockLog() def getBlockLog(self, nodeExtension): - blockLogDir=os.path.join(Utils.getNodeDataDir(nodeExtension), "blocks", "") + blockLogDir=Utils.getNodeDataDir(nodeExtension, "blocks") return Utils.getBlockLog(blockLogDir, exitOnError=False) def printBlockLog(self): diff --git a/tests/testUtils.py b/tests/testUtils.py index 38719fb8455..107be3f087a 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -37,6 +37,7 @@ class Utils: FileDivider="=================================================================" DataDir="var/lib/" + ConfigDir="etc/eosio/" @staticmethod def Print(*args, **kwargs): @@ -81,8 +82,22 @@ def nodeExtensionToName(ext): return "node_%02d" % (ext) @staticmethod - def getNodeDataDir(ext): - return os.path.join(Utils.DataDir, Utils.nodeExtensionToName(ext)) + def getNodeDataDir(ext, relativeDir=None, trailingSlash=False): + path=os.path.join(Utils.DataDir, Utils.nodeExtensionToName(ext)) + if relativeDir is not None: + path=os.path.join(path, relativeDir) + if trailingSlash: + path=os.path.join(path, "") + return path + + @staticmethod + def getNodeConfigDir(ext, relativeDir=None, trailingSlash=False): + path=os.path.join(Utils.ConfigDir, Utils.nodeExtensionToName(ext)) + if relativeDir is not None: + path=os.path.join(path, relativeDir) + if trailingSlash: + path=os.path.join(path, "") + return path @staticmethod def getChainStrategies(): From 50ea21fa9a6ddfc1feb46926fbcfa438975583ba Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 26 Feb 2019 08:44:31 -0600 Subject: [PATCH 06/94] Refactoring relaunch logic to allow for a general launch via a command line. --- tests/Node.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index ab0859c7b0d..802aa35e9df 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1332,19 +1332,8 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim myCmd=" ".join(cmdArr) - dataDir="var/lib/node_%02d" % (nodeId) - dt = datetime.datetime.now() - dateStr=Utils.getDateString(dt) - stdoutFile="%s/stdout.%s.txt" % (dataDir, dateStr) - stderrFile="%s/stderr.%s.txt" % (dataDir, dateStr) - with open(stdoutFile, 'w') as sout, open(stderrFile, 'w') as serr: - cmd=myCmd + ("" if chainArg is None else (" " + chainArg)) - Utils.Print("cmd: %s" % (cmd)) - popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr) - if cachePopen: - self.popenProc=popen - self.pid=popen.pid - if Utils.Debug: Utils.Print("restart Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) + cmd=myCmd + ("" if chainArg is None else (" " + chainArg)) + self.launchCmd(cmd, nodeId) def isNodeAlive(): """wait for node to be responsive.""" @@ -1366,6 +1355,20 @@ def isNodeAlive(): self.killed=False return True + def launchCmd(self, cmd, nodeId): + dataDir=Utils.getNodeDataDir(nodeId) + dt = datetime.datetime.now() + dateStr=Utils.getDateString(dt) + stdoutFile="%s/stdout.%s.txt" % (dataDir, dateStr) + stderrFile="%s/stderr.%s.txt" % (dataDir, dateStr) + with open(stdoutFile, 'w') as sout, open(stderrFile, 'w') as serr: + Utils.Print("cmd: %s" % (cmd)) + popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr) + if cachePopen: + self.popenProc=popen + self.pid=popen.pid + if Utils.Debug: Utils.Print("restart Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) + def trackCmdTransaction(self, trans, ignoreNonTrans=False): if trans is None: if Utils.Debug: Utils.Print(" cmd returned transaction: %s" % (trans)) From 7206f767e3a08d4ada52c86c89816e0a4f679783 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 6 Mar 2019 13:40:27 -0600 Subject: [PATCH 07/94] Fixed initialization of bios node and fixed merge error. --- programs/eosio-launcher/main.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 066305e4122..51a0808103b 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -247,7 +247,7 @@ class tn_node_def { vector producers; eosd_def* instance; string gelf_endpoint; - bool dont_start; + bool dont_start = false; }; void @@ -392,7 +392,6 @@ struct launcher_def { bool force_overwrite; size_t total_nodes; size_t unstarted_nodes; - size_t total_nodes; size_t prod_nodes; size_t producers; size_t next_node; @@ -893,7 +892,7 @@ launcher_def::bind_nodes () { int extra = producers % non_bios; unsigned int i = 0; unsigned int producer_number = 0; - const auto to_not_start_node = total_nodes - unstarted_nodes - 1; + const auto to_not_start_node = total_nodes - unstarted_nodes; for (auto &h : bindings) { for (auto &inst : h.instances) { bool is_bios = inst.name == "bios"; From 5c71b5d5e75eeb30bcbb5cf1906570b248b1f4a3 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 6 Mar 2019 13:42:33 -0600 Subject: [PATCH 08/94] Fixed error in launchCmd refactor. GH #6727 --- tests/Node.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 802aa35e9df..8f15ba5fece 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1333,7 +1333,7 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim myCmd=" ".join(cmdArr) cmd=myCmd + ("" if chainArg is None else (" " + chainArg)) - self.launchCmd(cmd, nodeId) + self.launchCmd(cmd, nodeId, cachePopen) def isNodeAlive(): """wait for node to be responsive.""" @@ -1355,7 +1355,7 @@ def isNodeAlive(): self.killed=False return True - def launchCmd(self, cmd, nodeId): + def launchCmd(self, cmd, nodeId, cachePopen=False): dataDir=Utils.getNodeDataDir(nodeId) dt = datetime.datetime.now() dateStr=Utils.getDateString(dt) From 5f86a9d20bc8ba4014a0397531104e7deb6bae57 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 6 Mar 2019 13:43:25 -0600 Subject: [PATCH 09/94] Fixed errors from previous attempt to explicitly set parameters. GH #6727 --- tests/consensus-validation-malicious-producers.py | 2 +- tests/distributed-transactions-test.py | 2 +- tests/restart-scenarios-test.py | 2 +- tests/validate-dirty-db.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/consensus-validation-malicious-producers.py b/tests/consensus-validation-malicious-producers.py index e3c6d7fe50e..6a3ac94d511 100755 --- a/tests/consensus-validation-malicious-producers.py +++ b/tests/consensus-validation-malicious-producers.py @@ -246,7 +246,7 @@ def myTest(transWillEnterBlock): topo="mesh" delay=0 Print("Stand up cluster") - if cluster.launch(pnodes=pnodes, total_nodes=total_nodes, topo=topo, delay=delay) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay) is False: error("Failed to stand up eos cluster.") return False diff --git a/tests/distributed-transactions-test.py b/tests/distributed-transactions-test.py index c3b794b89c0..2ea4edfe462 100755 --- a/tests/distributed-transactions-test.py +++ b/tests/distributed-transactions-test.py @@ -63,7 +63,7 @@ (pnodes, total_nodes-pnodes, topo, delay)) Print("Stand up cluster") - if cluster.launch(pnodes=pnodes, total_nodes=total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: errorExit("Failed to stand up eos cluster.") Print ("Wait for Cluster stabilization") diff --git a/tests/restart-scenarios-test.py b/tests/restart-scenarios-test.py index 6b3c217d75d..894a7d0d271 100755 --- a/tests/restart-scenarios-test.py +++ b/tests/restart-scenarios-test.py @@ -66,7 +66,7 @@ pnodes, topo, delay, chainSyncStrategyStr)) Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: errorExit("Failed to stand up eos cluster.") Print ("Wait for Cluster stabilization") diff --git a/tests/validate-dirty-db.py b/tests/validate-dirty-db.py index ac7520bc353..afcf2767b73 100755 --- a/tests/validate-dirty-db.py +++ b/tests/validate-dirty-db.py @@ -74,7 +74,7 @@ def runNodeosAndGetOutput(myTimeout=3): pnodes, topo, delay, chainSyncStrategyStr)) Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo=topo, delay=delay, dontBootstrap=True) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay, dontBootstrap=True) is False: errorExit("Failed to stand up eos cluster.") node=cluster.getNode(0) From b6852154701c309c389de6501b9ca8f7cf8f213d Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 9 Mar 2019 07:52:30 -0600 Subject: [PATCH 10/94] Cleanup. --- tests/nodeos_forked_chain_test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 4ef22ab082f..a7f2c777e3c 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -7,7 +7,6 @@ from WalletMgr import WalletMgr from Node import BlockType from Node import Node -from TestHelper import AppArgs from TestHelper import TestHelper import decimal From 87e9e61cc48c80505f650bf8cf3c54fe12de7bf1 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 9 Mar 2019 07:53:25 -0600 Subject: [PATCH 11/94] Added support for adding true flag. --- tests/TestHelper.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/TestHelper.py b/tests/TestHelper.py index a9920a731c1..768fccef890 100644 --- a/tests/TestHelper.py +++ b/tests/TestHelper.py @@ -22,6 +22,11 @@ def add(self, flag, type, help, default, choices=None): arg=self.AppArg(flag, type, help, default, choices) self.args.append(arg) + + def add_bool(self, flag, help, action='store_true'): + arg=self.AppArg(flag=flag, help=help, action=action) + self.args.append(arg) + # pylint: disable=too-many-instance-attributes class TestHelper(object): LOCAL_HOST="localhost" From a5ab5ba9ec1007f02d57c7357ca7abf420748ba4 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 9 Mar 2019 08:00:40 -0600 Subject: [PATCH 12/94] Fixing logic for launching started and unstarted nodes. --- tests/Cluster.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 0e16c803f05..258532331f1 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -131,8 +131,8 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me associatedNodeLabels=None): """Launch cluster. pnodes: producer nodes count - unstartedNodes: non-producer nodes that are configured into the launch, but not started - totalNodes: producer + non-producer nodes count + unstartedNodes: non-producer nodes that are configured into the launch, but not started. Should be included in totalNodes. + totalNodes: producer + non-producer nodes + unstarted non-producer nodes count prodCount: producers per producer node count topo: cluster topology (as defined by launcher, and "bridge" shape that is specific to this launch method) delay: delay between individual nodes launch (as defined by launcher) @@ -189,14 +189,14 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me tries = tries - 1 time.sleep(2) - cmd="%s -p %s -n %s -d %s -i %s -f --p2p-plugin %s %s" % ( + cmd="%s -p %s -n %s -d %s -i %s -f --p2p-plugin %s %s --unstarted-nodes %s" % ( Utils.EosLauncherPath, pnodes, totalNodes, delay, datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3], - p2pPlugin, producerFlag) + p2pPlugin, producerFlag, unstartedNodes) cmdArr=cmd.split() if self.staging: cmdArr.append("--nogen") - nodeosArgs="--max-transaction-time -1 --abi-serializer-max-time-ms 990000 --filter-on * --p2p-max-nodes-per-host %d" % (totalNodes) + nodeosArgs="--max-transaction-time -1 --abi-serializer-max-time-ms 990000 --filter-on \"*\" --p2p-max-nodes-per-host %d" % (totalNodes) if not self.walletd: nodeosArgs += " --plugin eosio::wallet_api_plugin" if self.enableMongo: @@ -262,7 +262,7 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me # of two entries - [ , ] with first being the name and second being the node definition shapeFileNodes = shapeFileObject["nodes"] - numProducers=totalProducers if totalProducers is not None else totalNodes + numProducers=totalProducers if totalProducers is not None else (totalNodes - unstartedNodes) maxProducers=ord('z')-ord('a')+1 assert numProducers Date: Sat, 9 Mar 2019 08:04:35 -0600 Subject: [PATCH 13/94] Fixed txn_test_gen_plugin to allow using different prefixes for the test accounts. --- .../txn_test_gen_plugin.cpp | 82 +++++++++++-------- 1 file changed, 50 insertions(+), 32 deletions(-) diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp index 60383175387..deea09ace55 100755 --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -101,6 +101,9 @@ struct txn_test_gen_plugin_impl { uint16_t thread_pool_size; optional thread_pool; std::shared_ptr timer; + name newaccountA; + name newaccountB; + name newaccountT; void push_next_transaction(const std::shared_ptr>& trxs, const std::function& next ) { chain_plugin& cp = app().get_plugin(); @@ -131,9 +134,6 @@ struct txn_test_gen_plugin_impl { trxs.reserve(2); try { - name newaccountA("txn.test.a"); - name newaccountB("txn.test.b"); - name newaccountC("txn.test.t"); name creator(init_name); abi_def currency_abi_def = fc::json::from_string(contracts::eosio_token_abi().data()).as(); @@ -170,12 +170,12 @@ struct txn_test_gen_plugin_impl { trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountB, owner_auth, active_auth}); } - //create "txn.test.t" account + //create "T" account { auto owner_auth = eosio::chain::authority{1, {{txn_text_receiver_C_pub_key, 1}}, {}}; auto active_auth = eosio::chain::authority{1, {{txn_text_receiver_C_pub_key, 1}}, {}}; - trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountC, owner_auth, active_auth}); + trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountT, owner_auth, active_auth}); } trx.expiration = cc.head_block_time() + fc::seconds(30); @@ -184,55 +184,67 @@ struct txn_test_gen_plugin_impl { trxs.emplace_back(std::move(trx)); } - //set txn.test.t contract to eosio.token & initialize it + //set newaccountT contract to eosio.token & initialize it { signed_transaction trx; vector wasm = contracts::eosio_token_wasm(); setcode handler; - handler.account = newaccountC; + handler.account = newaccountT; handler.code.assign(wasm.begin(), wasm.end()); - trx.actions.emplace_back( vector{{newaccountC,"active"}}, handler); + trx.actions.emplace_back( vector{{newaccountT,"active"}}, handler); { setabi handler; - handler.account = newaccountC; + handler.account = newaccountT; handler.abi = fc::raw::pack(json::from_string(contracts::eosio_token_abi().data()).as()); - trx.actions.emplace_back( vector{{newaccountC,"active"}}, handler); + trx.actions.emplace_back( vector{{newaccountT,"active"}}, handler); } { action act; - act.account = N(txn.test.t); + act.account = newaccountT; act.name = N(create); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("create", fc::json::from_string("{\"issuer\":\"txn.test.t\",\"maximum_supply\":\"1000000000.0000 CUR\"}}"), abi_serializer_max_time); + act.authorization = vector{{newaccountT,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("create", + fc::json::from_string(fc::format_string("{\"issuer\":\"${issuer}\",\"maximum_supply\":\"1000000000.0000 CUR\"}}", + fc::mutable_variant_object()("issuer",newaccountT.to_string()))), + abi_serializer_max_time); trx.actions.push_back(act); } { action act; - act.account = N(txn.test.t); + act.account = newaccountT; act.name = N(issue); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("issue", fc::json::from_string("{\"to\":\"txn.test.t\",\"quantity\":\"60000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + act.authorization = vector{{newaccountT,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("issue", + fc::json::from_string(fc::format_string("{\"to\":\"${to}\",\"quantity\":\"60000.0000 CUR\",\"memo\":\"\"}", + fc::mutable_variant_object()("to",newaccountT.to_string()))), + abi_serializer_max_time); trx.actions.push_back(act); } { action act; - act.account = N(txn.test.t); + act.account = newaccountT; act.name = N(transfer); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("transfer", fc::json::from_string("{\"from\":\"txn.test.t\",\"to\":\"txn.test.a\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + act.authorization = vector{{newaccountT,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"${to}\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}", + fc::mutable_variant_object()("from",newaccountT.to_string())("to",newaccountA.to_string()))), + abi_serializer_max_time); trx.actions.push_back(act); } { action act; - act.account = N(txn.test.t); + act.account = newaccountT; act.name = N(transfer); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("transfer", fc::json::from_string("{\"from\":\"txn.test.t\",\"to\":\"txn.test.b\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + act.authorization = vector{{newaccountT,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"${to}\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}", + fc::mutable_variant_object()("from",newaccountT.to_string())("to",newaccountB.to_string()))), + abi_serializer_max_time); trx.actions.push_back(act); } @@ -266,20 +278,20 @@ struct txn_test_gen_plugin_impl { auto abi_serializer_max_time = app().get_plugin().get_abi_serializer_max_time(); abi_serializer eosio_token_serializer{fc::json::from_string(contracts::eosio_token_abi().data()).as(), abi_serializer_max_time}; //create the actions here - act_a_to_b.account = N(txn.test.t); + act_a_to_b.account = newaccountT; act_a_to_b.name = N(transfer); - act_a_to_b.authorization = vector{{name("txn.test.a"),config::active_name}}; - act_a_to_b.data = eosio_token_serializer.variant_to_binary("transfer", - fc::json::from_string(fc::format_string("{\"from\":\"txn.test.a\",\"to\":\"txn.test.b\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", - fc::mutable_variant_object()("l", salt))), + act_a_to_b.authorization = vector{{newaccountA,config::active_name}}; + act_a_to_b.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"{to}\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", + fc::mutable_variant_object()("from",newaccountA.to_string())("to",newaccountB.to_string())("l", salt))), abi_serializer_max_time); - act_b_to_a.account = N(txn.test.t); + act_b_to_a.account = newaccountT; act_b_to_a.name = N(transfer); - act_b_to_a.authorization = vector{{name("txn.test.b"),config::active_name}}; - act_b_to_a.data = eosio_token_serializer.variant_to_binary("transfer", - fc::json::from_string(fc::format_string("{\"from\":\"txn.test.b\",\"to\":\"txn.test.a\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", - fc::mutable_variant_object()("l", salt))), + act_b_to_a.authorization = vector{{newaccountB,config::active_name}}; + act_b_to_a.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"${to}\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", + fc::mutable_variant_object()("from",newaccountB.to_string())("to",newaccountA.to_string())("l", salt))), abi_serializer_max_time); timer_timeout = period; @@ -371,6 +383,7 @@ struct txn_test_gen_plugin_impl { next(e.dynamic_copy_exception()); } + ilog("send ${c} transactions", ("c",trxs.size())); push_transactions(std::move(trxs), next); } @@ -414,6 +427,7 @@ void txn_test_gen_plugin::set_program_options(options_description&, options_desc cfg.add_options() ("txn-reference-block-lag", bpo::value()->default_value(0), "Lag in number of blocks from the head block when selecting the reference block for transactions (-1 means Last Irreversible Block)") ("txn-test-gen-threads", bpo::value()->default_value(2), "Number of worker threads in txn_test_gen thread pool") + ("txn-test-gen-account-prefix", bpo::value()->default_value("txn.test."), "Prefix to use for accounts generated and used by this plugin") ; } @@ -422,6 +436,10 @@ void txn_test_gen_plugin::plugin_initialize(const variables_map& options) { my.reset( new txn_test_gen_plugin_impl ); my->txn_reference_block_lag = options.at( "txn-reference-block-lag" ).as(); my->thread_pool_size = options.at( "txn-test-gen-threads" ).as(); + const std::string thread_pool_account_prefix = options.at( "txn-test-gen-account-prefix" ).as(); + my->newaccountA = thread_pool_account_prefix + "a"; + my->newaccountB = thread_pool_account_prefix + "b"; + my->newaccountT = thread_pool_account_prefix + "t"; EOS_ASSERT( my->thread_pool_size > 0, chain::plugin_config_exception, "txn-test-gen-threads ${num} must be greater than 0", ("num", my->thread_pool_size) ); } FC_LOG_AND_RETHROW() From 52ac5788faea3696af3aea745ffcdc8e48dfad0a Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 9 Mar 2019 08:08:23 -0600 Subject: [PATCH 14/94] Pulled out curl processing into its own function and added functions for interacting with the test accounts. --- tests/Node.py | 38 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 8f15ba5fece..77c3157b5dc 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1075,8 +1075,12 @@ def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head assert(isinstance(blockType, BlockType)) assert(isinstance(returnType, ReturnType)) basedOnLib="true" if blockType==BlockType.lib else "false" - cmd="curl %s/v1/test_control/kill_node_on_producer -d '{ \"producer\":\"%s\", \"where_in_sequence\":%d, \"based_on_lib\":\"%s\" }' -X POST -H \"Content-Type: application/json\"" % \ - (self.endpointHttp, producer, whereInSequence, basedOnLib) + payload="{ \"producer\":\"%s\", \"where_in_sequence\":%d, \"based_on_lib\":\"%s\" }" % (producer, whereInSequence, basedOnLib) + return self.processCurlCmd("test_control", "kill_node_on_producer", payload, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=exitMsg, returnType=returnType) + + def processCurlCmd(self, resource, command, payload, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): + cmd="curl %s/v1/%s/%s -d '%s' -X POST -H \"Content-Type: application/json\"" % \ + (self.endpointHttp, resource, command, payload) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) rtn=None start=time.perf_counter() @@ -1113,6 +1117,23 @@ def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head return rtn + def txnGenCreateTestAccounts(self, genAccount, genKey, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): + assert(isinstance(genAccount, str)) + assert(isinstance(genKey, str)) + assert(isinstance(returnType, ReturnType)) + + payload="[ \"%s\", \"%s\" ]" % (genAccount, genKey) + return self.processCurlCmd("txn_test_gen", "create_test_accounts", payload, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=exitMsg, returnType=returnType) + + def txnGenStart(self, salt, period, batchSize, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): + assert(isinstance(salt, str)) + assert(isinstance(period, int)) + assert(isinstance(batchSize, int)) + assert(isinstance(returnType, ReturnType)) + + payload="[ \"%s\", %d, %d ]" % (salt, period, batchSize) + return self.processCurlCmd("txn_test_gen", "start_generation", payload, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=exitMsg, returnType=returnType) + def waitForTransBlockIfNeeded(self, trans, waitForTransBlock, exitOnError=False): if not waitForTransBlock: return trans @@ -1355,6 +1376,19 @@ def isNodeAlive(): self.killed=False return True + def launchUnstarted(self, nodeId, cachePopen=False): + startFile=Utils.getNodeDataDir(nodeId, "start.cmd") + if not os.path.exists(startFile): + Utils.Print("Cannot launch unstarted process since %s file does not exist" % startFile) + return False + + with open(startFile, 'r') as file: + cmd=file.read() + Utils.Print("launchUnstarted cmd: %s" % (cmd)) + + self.launchCmd(cmd, nodeId, cachePopen) + return True + def launchCmd(self, cmd, nodeId, cachePopen=False): dataDir=Utils.getNodeDataDir(nodeId) dt = datetime.datetime.now() From 277df3c0c5319d7ad38e0e44b7e3e2244a0a9b7e Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 11 Mar 2019 21:59:07 -0500 Subject: [PATCH 15/94] Fix error in variable substitution. --- plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp index deea09ace55..780127efc15 100755 --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -130,6 +130,7 @@ struct txn_test_gen_plugin_impl { } void create_test_accounts(const std::string& init_name, const std::string& init_priv_key, const std::function& next) { + ilog("create_test_accounts"); std::vector trxs; trxs.reserve(2); @@ -282,7 +283,7 @@ struct txn_test_gen_plugin_impl { act_a_to_b.name = N(transfer); act_a_to_b.authorization = vector{{newaccountA,config::active_name}}; act_a_to_b.data = eosio_token_serializer.variant_to_binary("transfer", - fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"{to}\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"${to}\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", fc::mutable_variant_object()("from",newaccountA.to_string())("to",newaccountB.to_string())("l", salt))), abi_serializer_max_time); From 3089f7039010ad6ba438cde1cb4055b2d80f1d5a Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 11 Mar 2019 22:01:36 -0500 Subject: [PATCH 16/94] Add option to not load system contract. GH #6727 --- tests/Cluster.py | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 258532331f1..8665deffb32 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -128,7 +128,7 @@ def setAlternateVersionLabels(self, file): # pylint: disable=too-many-statements def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None, alternateVersionLabelsFile=None, - associatedNodeLabels=None): + associatedNodeLabels=None, loadSystemContract=True): """Launch cluster. pnodes: producer nodes count unstartedNodes: non-producer nodes that are configured into the launch, but not started. Should be included in totalNodes. @@ -147,6 +147,7 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me --specific-nodeos flags on launcher), example: { "5" : "--plugin eosio::test_control_api_plugin" } alternateVersionLabelsFile: Supply an alternate version labels file to use with associatedNodeLabels. associatedNodeLabels: Supply a dictionary of node numbers to use an alternate label for a specific node. + loadSystemContract: indicate whether the eosio.system contract should be loaded (setting this to False causes useBiosBootFile to be treated as False) """ assert(isinstance(topo, str)) if alternateVersionLabelsFile is not None: @@ -397,8 +398,10 @@ def connectGroup(group, producerNodes, bridgeNodes) : return True Utils.Print("Bootstrap cluster.") + if not loadSystemContract: + useBiosBootFile=False #ensure we use Cluster.bootstrap if onlyBios or not useBiosBootFile: - self.biosNode=Cluster.bootstrap(startedNodes, prodCount, totalProducers, Cluster.__BiosHost, Cluster.__BiosPort, self.walletMgr, onlyBios) + self.biosNode=Cluster.bootstrap(startedNodes, prodCount, totalProducers, Cluster.__BiosHost, Cluster.__BiosPort, self.walletMgr, onlyBios, loadSystemContract) if self.biosNode is None: Utils.Print("ERROR: Bootstrap failed.") return False @@ -965,7 +968,7 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr, silent=False): return biosNode @staticmethod - def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletMgr, onlyBios=False): + def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletMgr, onlyBios=False, loadSystemContract=True): """Create 'prodCount' init accounts and deposits 10000000000 SYS in each. If prodCount is -1 will initialize all possible producers. Ensure nodes are inter-connected prior to this call. One way to validate this will be to check if every node has block 1.""" @@ -1187,17 +1190,18 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletM (expectedAmount, actualAmount)) return None - contract="eosio.system" - contractDir="unittests/contracts/%s" % (contract) - wasmFile="%s.wasm" % (contract) - abiFile="%s.abi" % (contract) - Utils.Print("Publish %s contract" % (contract)) - trans=biosNode.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) - if trans is None: - Utils.Print("ERROR: Failed to publish contract %s." % (contract)) - return None + if loadSystemContract: + contract="eosio.system" + contractDir="unittests/contracts/%s" % (contract) + wasmFile="%s.wasm" % (contract) + abiFile="%s.abi" % (contract) + Utils.Print("Publish %s contract" % (contract)) + trans=biosNode.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) + if trans is None: + Utils.Print("ERROR: Failed to publish contract %s." % (contract)) + return None - Node.validateTransaction(trans) + Node.validateTransaction(trans) initialFunds="1000000.0000 {0}".format(CORE_SYMBOL) Utils.Print("Transfer initial fund %s to individual accounts." % (initialFunds)) From 0532e5c787372453c44814a645cc522c3e6e6020 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 11 Mar 2019 22:07:10 -0500 Subject: [PATCH 17/94] Add test to ensure catchup lockup does not occur. GH #6727 --- tests/nodeos_startup_catchup.py | 97 +++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100755 tests/nodeos_startup_catchup.py diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py new file mode 100755 index 00000000000..da75a72b23b --- /dev/null +++ b/tests/nodeos_startup_catchup.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 + +from testUtils import Utils +import testUtils +import time +from Cluster import Cluster +from WalletMgr import WalletMgr +from Node import Node +from TestHelper import AppArgs +from TestHelper import TestHelper + +import decimal +import math +import re + +############################################################### +# nodeos_startup_catchup +# Test configures a producing node and <--txn-plugins count> non-producing nodes with the +# txn_test_gen_plugin. Each non-producing node starts generating transactions and sending them +# to the producing node. +# 1) After 10 seconds a new node is started. +# 2) 10 seconds later, that node is checked to see if it has caught up to the producing node and +# that node is killed and a new node is started. +# 3) Repeat step 2, <--catchup-count - 1> more times +############################################################### + +Print=Utils.Print +errorExit=Utils.errorExit + +from core_symbol import CORE_SYMBOL + +appArgs=AppArgs() +extraArgs = appArgs.add(flag="--catchup-count", type=int, help="How many catchup-nodes to launch", default=10) +extraArgs = appArgs.add(flag="--txn-gen-nodes", type=int, help="How many transaction generator nodes", default=4) +args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", + "-p","--p2p-plugin","--wallet-port"}, applicationSpecificArgs=appArgs) +Utils.Debug=args.v +pnodes=args.p if args.p > 0 else 1 +startedNonProdNodes = args.txn_gen_nodes if args.txn_gen_nodes >= 2 else 2 +cluster=Cluster(walletd=True) +dumpErrorDetails=args.dump_error_details +keepLogs=args.keep_logs +dontKill=args.leave_running +prodCount=args.prod_count if args.prod_count > 1 else 2 +killAll=args.clean_run +p2pPlugin=args.p2p_plugin +walletPort=args.wallet_port +catchupCount=args.catchup_count +totalNodes=startedNonProdNodes+pnodes+catchupCount + +walletMgr=WalletMgr(True, port=walletPort) +testSuccessful=False +killEosInstances=not dontKill +killWallet=not dontKill + +WalletdName=Utils.EosWalletName +ClientName="cleos" + +try: + TestHelper.printSystemInfo("BEGIN") + cluster.setWalletMgr(walletMgr) + + cluster.killall(allInstances=killAll) + cluster.cleanup() + specificExtraNodeosArgs={} + txnGenNodeNum=pnodes # next node after producer nodes + for nodeNum in range(txnGenNodeNum, txnGenNodeNum+startedNonProdNodes): + specificExtraNodeosArgs[nodeNum]="--plugin eosio::txn_test_gen_plugin --txn-test-gen-account-prefix txntestacct" + Print("Stand up cluster") + if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=pnodes, totalNodes=totalNodes, totalProducers=pnodes*prodCount, p2pPlugin=p2pPlugin, + useBiosBootFile=False, specificExtraNodeosArgs=specificExtraNodeosArgs, unstartedNodes=catchupCount, loadSystemContract=False) is False: + Utils.cmdError("launcher") + Utils.errorExit("Failed to stand up eos cluster.") + + Print("Validating system accounts after bootstrap") + cluster.validateAccounts(None) + + txnGenNodes=[] + for nodeNum in range(txnGenNodeNum, txnGenNodeNum+startedNonProdNodes): + txnGenNodes.append(cluster.getNode(nodeNum)) + + txnGenNodes[0].txnGenCreateTestAccounts(cluster.eosioAccount.name, cluster.eosioAccount.activePrivateKey) + time.sleep(20) + + for genNum in range(0, len(txnGenNodes)): + salt="%d" % genNum + txnGenNodes[genNum].txnGenStart(salt, 1000, 200) + + time.sleep(10) + + + testSuccessful=True + +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + +exit(0) From 97f777bbb073316680a2e5214ede61a1503c397c Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 22 Mar 2019 22:57:01 -0500 Subject: [PATCH 18/94] Fixed launcher setup of unstarted nodes. GH #6727. --- programs/eosio-launcher/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 51a0808103b..35f12b94e75 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -892,7 +892,7 @@ launcher_def::bind_nodes () { int extra = producers % non_bios; unsigned int i = 0; unsigned int producer_number = 0; - const auto to_not_start_node = total_nodes - unstarted_nodes; + const auto to_not_start_node = total_nodes - unstarted_nodes - 1; for (auto &h : bindings) { for (auto &inst : h.instances) { bool is_bios = inst.name == "bios"; From dd4d3a476cad1ffe8b13e09f154ffb5a5607f4de Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 22 Mar 2019 23:01:01 -0500 Subject: [PATCH 19/94] Added python script handling for unstarted nodes. GH #6727. --- tests/Cluster.py | 31 +++++++++++++++++++++++++++++++ tests/Node.py | 27 ++++++++++++++------------- 2 files changed, 45 insertions(+), 13 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 8665deffb32..debfa1464cd 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -50,6 +50,7 @@ def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888 """ self.accounts={} self.nodes={} + self.unstartedNodes=[] self.localCluster=localCluster self.wallet=None self.walletd=walletd @@ -379,6 +380,9 @@ def connectGroup(group, producerNodes, bridgeNodes) : self.nodes=nodes + if unstartedNodes > 0: + self.unstartedNodes=self.discoverUnstartedLocalNodes(unstartedNodes, totalNodes) + if onlyBios: biosNode=Node(Cluster.__BiosHost, Cluster.__BiosPort, walletMgr=self.walletMgr) if not biosNode.checkPulse(): @@ -645,6 +649,16 @@ def getNode(self, nodeId=0, exitOnError=True): def getNodes(self): return self.nodes + def launchUnstarted(self, numToLaunch=1, cachePopen=False): + assert(isinstance(numToLaunch, int)) + assert(numToLaunch>0) + launchList=self.unstartedNodes[:numToLaunch] + del self.unstartedNodes[:numToLaunch] + for node in launchList: + # the node number is indexed off of the started nodes list + node.launchUnstarted(len(self.nodes), cachePopen=cachePopen) + self.nodes.append(node) + # Spread funds across accounts with transactions spread through cluster nodes. # Validate transactions are synchronized on root node def spreadFunds(self, source, accounts, amount=1): @@ -1485,6 +1499,23 @@ def createAccounts(self, creator, waitForTransBlock=True, stakedDeposit=1000): return True + def discoverUnstartedLocalNodes(self, unstartedNodes, totalNodes): + unstarted=[] + firstUnstartedNode=totalNodes-unstartedNodes + for nodeId in range(firstUnstartedNode, totalNodes): + unstarted.append(self.discoverUnstartedLocalNode(nodeId)) + return unstarted + + def discoverUnstartedLocalNode(self, nodeId): + startFile=Node.unstartedFile(nodeId) + with open(startFile, 'r') as file: + cmd=file.read() + Utils.Print("unstarted local node cmd: %s" % (cmd)) + p=re.compile(r'^\s*(\w+)\s*=\s*([^\s](?:.*[^\s])?)\s*$') + instance=Node(self.host, port=self.port+nodeId, pid=None, cmd=cmd, walletMgr=self.walletMgr, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) + if Utils.Debug: Utils.Print("Unstarted Node>", instance) + return instance + def getInfos(self, silentErrors=False, exitOnError=False): infos=[] for node in self.nodes: diff --git a/tests/Node.py b/tests/Node.py index 77c3157b5dc..7b3259ece53 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -62,7 +62,7 @@ def eosClientArgs(self): def __str__(self): #return "Host: %s, Port:%d, Pid:%s, Cmd:\"%s\"" % (self.host, self.port, self.pid, self.cmd) - return "Host: %s, Port:%d" % (self.host, self.port) + return "Host: %s, Port:%d, Pid:%s" % (self.host, self.port, self.pid) @staticmethod def validateTransaction(trans): @@ -1095,6 +1095,8 @@ def processCurlCmd(self, resource, command, payload, silentErrors=True, exitOnEr if Utils.Debug: end=time.perf_counter() Utils.Print("cmd Duration: %.3f sec" % (end-start)) + printReturn=json.dumps(rtn) if returnType==ReturnType.json else rtn + Utils.Print("cmd returned: %s" % (printReturn)) except subprocess.CalledProcessError as ex: if not silentErrors: end=time.perf_counter() @@ -1241,12 +1243,12 @@ def myFunc(): self.killed=True return True - def interruptAndVerifyExitStatus(self): + def interruptAndVerifyExitStatus(self, timeout=15): if Utils.Debug: Utils.Print("terminating node: %s" % (self.cmd)) assert self.popenProc is not None, "node: \"%s\" does not have a popenProc, this may be because it is only set after a relaunch." % (self.cmd) self.popenProc.send_signal(signal.SIGINT) try: - outs, _ = self.popenProc.communicate(timeout=15) + outs, _ = self.popenProc.communicate(timeout=timeout) assert self.popenProc.returncode == 0, "Expected terminating \"%s\" to have an exit status of 0, but got %d" % (self.cmd, self.popenProc.returncode) except subprocess.TimeoutExpired: Utils.errorExit("Terminate call failed on node: %s" % (self.cmd)) @@ -1376,18 +1378,17 @@ def isNodeAlive(): self.killed=False return True - def launchUnstarted(self, nodeId, cachePopen=False): + @staticmethod + def unstartedFile(nodeId): + assert(isinstance(nodeId, int)) startFile=Utils.getNodeDataDir(nodeId, "start.cmd") if not os.path.exists(startFile): - Utils.Print("Cannot launch unstarted process since %s file does not exist" % startFile) - return False - - with open(startFile, 'r') as file: - cmd=file.read() - Utils.Print("launchUnstarted cmd: %s" % (cmd)) + Utils.errorExit("Cannot find unstarted node since %s file does not exist" % startFile) + return startFile - self.launchCmd(cmd, nodeId, cachePopen) - return True + def launchUnstarted(self, nodeId, cachePopen=False): + Utils.Print("launchUnstarted cmd: %s" % (self.cmd)) + self.launchCmd(self.cmd, nodeId, cachePopen) def launchCmd(self, cmd, nodeId, cachePopen=False): dataDir=Utils.getNodeDataDir(nodeId) @@ -1401,7 +1402,7 @@ def launchCmd(self, cmd, nodeId, cachePopen=False): if cachePopen: self.popenProc=popen self.pid=popen.pid - if Utils.Debug: Utils.Print("restart Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) + if Utils.Debug: Utils.Print("start Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) def trackCmdTransaction(self, trans, ignoreNonTrans=False): if trans is None: From 03c2eaa624c45de9616d522add7be8fc9a13e3e0 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 22 Mar 2019 23:11:05 -0500 Subject: [PATCH 20/94] Added starting up unstarted nodes and verifying catchup. GH #6727. --- tests/nodeos_startup_catchup.py | 78 ++++++++++++++++++++++++++++++--- 1 file changed, 73 insertions(+), 5 deletions(-) diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index da75a72b23b..bc73392c702 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -5,6 +5,7 @@ import time from Cluster import Cluster from WalletMgr import WalletMgr +from Node import BlockType from Node import Node from TestHelper import AppArgs from TestHelper import TestHelper @@ -31,7 +32,7 @@ appArgs=AppArgs() extraArgs = appArgs.add(flag="--catchup-count", type=int, help="How many catchup-nodes to launch", default=10) -extraArgs = appArgs.add(flag="--txn-gen-nodes", type=int, help="How many transaction generator nodes", default=4) +extraArgs = appArgs.add(flag="--txn-gen-nodes", type=int, help="How many transaction generator nodes", default=2) args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", "-p","--p2p-plugin","--wallet-port"}, applicationSpecificArgs=appArgs) Utils.Debug=args.v @@ -45,7 +46,7 @@ killAll=args.clean_run p2pPlugin=args.p2p_plugin walletPort=args.wallet_port -catchupCount=args.catchup_count +catchupCount=args.catchup_count if args.catchup_count > 0 else 1 totalNodes=startedNonProdNodes+pnodes+catchupCount walletMgr=WalletMgr(True, port=walletPort) @@ -69,7 +70,6 @@ Print("Stand up cluster") if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=pnodes, totalNodes=totalNodes, totalProducers=pnodes*prodCount, p2pPlugin=p2pPlugin, useBiosBootFile=False, specificExtraNodeosArgs=specificExtraNodeosArgs, unstartedNodes=catchupCount, loadSystemContract=False) is False: - Utils.cmdError("launcher") Utils.errorExit("Failed to stand up eos cluster.") Print("Validating system accounts after bootstrap") @@ -84,11 +84,79 @@ for genNum in range(0, len(txnGenNodes)): salt="%d" % genNum - txnGenNodes[genNum].txnGenStart(salt, 1000, 200) + txnGenNodes[genNum].txnGenStart(salt, 1500, 150) + time.sleep(1) + + node0=cluster.getNode(0) + + def lib(node): + return node.getBlockNum(BlockType.lib) + + def head(node): + return node.getBlockNum(BlockType.head) time.sleep(10) + retryCountMax=100 + for catchup_num in range(0, catchupCount): + lastLibNum=lib(node0) + lastHeadNum=head(node0) + lastCatchupLibNum=None + + cluster.launchUnstarted(cachePopen=True) + retryCount=0 + # verify that production node is advancing (sanity check) + while lib(node0)<=lastLibNum: + time.sleep(4) + retryCount+=1 + # give it some more time if the head is still moving forward + if retryCount>=20 or head(node0)<=lastHeadNum: + Utils.errorExit("Node 0 failing to advance lib. Was %s, now %s." % (lastLibNum, lib(node0))) + if Utils.Debug: Utils.Print("Node 0 head was %s, now %s. Waiting for lib to advance" % (lastLibNum, lib(node0))) + lastHeadNum=head(node0) + + catchupNode=cluster.getNodes()[-1] + time.sleep(9) + lastCatchupLibNum=lib(catchupNode) + lastCatchupHeadNum=head(catchupNode) + retryCount=0 + while lib(catchupNode)<=lastCatchupLibNum: + time.sleep(5) + retryCount+=1 + # give it some more time if the head is still moving forward + if retryCount>=100 or head(catchupNode)<=lastCatchupHeadNum: + Utils.errorExit("Catchup Node %s failing to advance lib. Was %s, now %s." % + (cluster.getNodes().index(catchupNode), lastCatchupLibNum, lib(catchupNode))) + if Utils.Debug: Utils.Print("Catchup Node %s head was %s, now %s. Waiting for lib to advance" % (cluster.getNodes().index(catchupNode), lastCatchupLibNum, lib(catchupNode))) + lastCatchupHeadNum=head(catchupNode) + + retryCount=0 + lastLibNum=lib(node0) + trailingLibNum=lastLibNum-lib(catchupNode) + lastHeadNum=head(node0) + libNotMovingCount=0 + while trailingLibNum>0: + delay=5 + time.sleep(delay) + libMoving=lib(catchupNode)>lastCatchupLibNum + if libMoving: + trailingLibNum=lastLibNum-lib(catchupNode) + libNotMovingCount=0 + else: + libNotMovingCount+=1 + if Utils.Debug and libNotMovingCount%10==0: + Utils.Print("Catchup node %s lib has not moved for %s seconds, lib is %s" % + (cluster.getNodes().index(catchupNode), (delay*libNotMovingCount), lib(catchupNode))) + retryCount+=1 + # give it some more time if the head is still moving forward + if retryCount>=retryCountMax or head(catchupNode)<=lastCatchupHeadNum or libNotMovingCount>100: + Utils.errorExit("Catchup Node %s failing to advance lib along with node 0. Catchup node lib is %s, node 0 lib is %s." % + (cluster.getNodes().index(catchupNode), lib(catchupNode), lastLibNum)) + if Utils.Debug: Utils.Print("Catchup Node %s head is %s, node 0 head is %s. Waiting for lib to advance from %s to %s" % (cluster.getNodes().index(catchupNode), head(catchupNode), head(node0), lib(catchupNode), lastLibNum)) + lastCatchupHeadNum=head(catchupNode) + + catchupNode.interruptAndVerifyExitStatus(60) + retryCountMax*=3 - testSuccessful=True finally: From fef0d2acb7e19193678d28692a9943cc95267f15 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 22 Mar 2019 23:15:08 -0500 Subject: [PATCH 21/94] Changed api to return a json status to indicate what happened. GH #6727. --- .../txn_test_gen_plugin.cpp | 25 ++++++++++++------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp index 780127efc15..670114ea85c 100755 --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -28,9 +28,13 @@ using namespace eosio::testing; namespace eosio { namespace detail { struct txn_test_gen_empty {}; + struct txn_test_gen_status { + string status; + }; }} FC_REFLECT(eosio::detail::txn_test_gen_empty, ); +FC_REFLECT(eosio::detail::txn_test_gen_status, (status)); namespace eosio { @@ -53,8 +57,8 @@ using io_work_t = boost::asio::executor_work_guard(); \ - api_handle->call_name(vs.at(0).as(), vs.at(1).as(), vs.at(2).as()); \ - eosio::detail::txn_test_gen_empty result; + auto status = api_handle->call_name(vs.at(0).as(), vs.at(1).as(), vs.at(2).as()); \ + eosio::detail::txn_test_gen_status result = { status }; #define INVOKE_V_R_R(api_handle, call_name, in_param0, in_param1) \ const auto& vs = fc::json::json::from_string(body).as(); \ @@ -179,7 +183,7 @@ struct txn_test_gen_plugin_impl { trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountT, owner_auth, active_auth}); } - trx.expiration = cc.head_block_time() + fc::seconds(30); + trx.expiration = cc.head_block_time() + fc::seconds(180); trx.set_reference_block(cc.head_block_id()); trx.sign(creator_priv_key, chainid); trxs.emplace_back(std::move(trx)); @@ -249,7 +253,7 @@ struct txn_test_gen_plugin_impl { trx.actions.push_back(act); } - trx.expiration = cc.head_block_time() + fc::seconds(30); + trx.expiration = cc.head_block_time() + fc::seconds(180); trx.set_reference_block(cc.head_block_id()); trx.max_net_usage_words = 5000; trx.sign(txn_test_receiver_C_priv_key, chainid); @@ -263,15 +267,17 @@ struct txn_test_gen_plugin_impl { push_transactions(std::move(trxs), next); } - void start_generation(const std::string& salt, const uint64_t& period, const uint64_t& batch_size) { + string start_generation(const std::string& salt, const uint64_t& period, const uint64_t& batch_size) { + ilog("Starting transaction test plugin"); if(running) - throw fc::exception(fc::invalid_operation_exception_code); + return "start_generation already running"; if(period < 1 || period > 2500) - throw fc::exception(fc::invalid_operation_exception_code); + return "period must be between 1 and 2500"; if(batch_size < 1 || batch_size > 250) - throw fc::exception(fc::invalid_operation_exception_code); + return "batch_size must be between 1 and 250"; if(batch_size & 1) - throw fc::exception(fc::invalid_operation_exception_code); + return "batch_size must be even"; + ilog("Starting transaction test plugin valid"); running = true; @@ -312,6 +318,7 @@ struct txn_test_gen_plugin_impl { boost::asio::post( *gen_ioc, [this]() { arm_timer(boost::asio::high_resolution_timer::clock_type::now()); }); + return "success"; } void arm_timer(boost::asio::high_resolution_timer::time_point s) { From 640257efc443e55a17984807cda4710fb34ce96e Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 23 Mar 2019 00:03:42 -0500 Subject: [PATCH 22/94] Added nodeos_startup_catchup to long running tests. GH #6727. --- tests/CMakeLists.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index ae9b36bcd68..0eea67cbce3 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -35,6 +35,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/distributed-transactions-test.py ${CM configure_file(${CMAKE_CURRENT_SOURCE_DIR}/distributed-transactions-remote-test.py ${CMAKE_CURRENT_BINARY_DIR}/distributed-transactions-remote-test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/sample-cluster-map.json ${CMAKE_CURRENT_BINARY_DIR}/sample-cluster-map.json COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/restart-scenarios-test.py ${CMAKE_CURRENT_BINARY_DIR}/restart-scenarios-test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_startup_catchup.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_startup_catchup.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_forked_chain_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_forked_chain_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_run_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_remote_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_run_remote_test.py COPYONLY) @@ -106,6 +107,8 @@ set_property(TEST nodeos_voting_bnet_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_avail_ram.py -v --wallet-port 9904 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_startup_catchup_lr_test COMMAND tests/nodeos_startup_catchup.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_startup_catchup_lr_test PROPERTY LABELS long_running_tests) if(ENABLE_COVERAGE_TESTING) From fcd01c82b01574f474ceaac068d84882a9cff4bb Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 23 Mar 2019 15:55:56 -0500 Subject: [PATCH 23/94] Fixed interruptAndVerifyExitStatus to track that it was killed. GH #6727. --- tests/Node.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/Node.py b/tests/Node.py index 7b3259ece53..3e31c396d5f 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1253,6 +1253,10 @@ def interruptAndVerifyExitStatus(self, timeout=15): except subprocess.TimeoutExpired: Utils.errorExit("Terminate call failed on node: %s" % (self.cmd)) + # mark node as killed + self.pid=None + self.killed=True + def verifyAlive(self, silent=False): if not silent and Utils.Debug: Utils.Print("Checking if node(pid=%s) is alive(killed=%s): %s" % (self.pid, self.killed, self.cmd)) if self.killed or self.pid is None: @@ -1318,7 +1322,7 @@ def getNextCleanProductionCycle(self, trans): # TBD: make nodeId an internal property # pylint: disable=too-many-locals - def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTimeout, addOrSwapFlags=None, cachePopen=False): + def relaunch(self, nodeId, chainArg=None, newChain=False, timeout=Utils.systemWaitTimeout, addOrSwapFlags=None, cachePopen=False): assert(self.pid is None) assert(self.killed) From 2790d66b81dd021b335249dcb360ddf3eea5d2ba Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 23 Mar 2019 16:00:06 -0500 Subject: [PATCH 24/94] Added catchup after relaunching the catchup node and refactored test using framework methods. GH #6727. --- tests/nodeos_startup_catchup.py | 104 ++++++++++++-------------------- 1 file changed, 39 insertions(+), 65 deletions(-) diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index bc73392c702..c7f1fa80ae4 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -20,9 +20,11 @@ # txn_test_gen_plugin. Each non-producing node starts generating transactions and sending them # to the producing node. # 1) After 10 seconds a new node is started. -# 2) 10 seconds later, that node is checked to see if it has caught up to the producing node and -# that node is killed and a new node is started. -# 3) Repeat step 2, <--catchup-count - 1> more times +# 2) the node is allowed to catch up to the producing node +# 3) that node is killed +# 4) restart the node +# 5) the node is allowed to catch up to the producing node +# 3) Repeat steps 2-5, <--catchup-count - 1> more times ############################################################### Print=Utils.Print @@ -80,14 +82,6 @@ txnGenNodes.append(cluster.getNode(nodeNum)) txnGenNodes[0].txnGenCreateTestAccounts(cluster.eosioAccount.name, cluster.eosioAccount.activePrivateKey) - time.sleep(20) - - for genNum in range(0, len(txnGenNodes)): - salt="%d" % genNum - txnGenNodes[genNum].txnGenStart(salt, 1500, 150) - time.sleep(1) - - node0=cluster.getNode(0) def lib(node): return node.getBlockNum(BlockType.lib) @@ -95,67 +89,47 @@ def lib(node): def head(node): return node.getBlockNum(BlockType.head) - time.sleep(10) - retryCountMax=100 - for catchup_num in range(0, catchupCount): - lastLibNum=lib(node0) - lastHeadNum=head(node0) - lastCatchupLibNum=None + node0=cluster.getNode(0) + blockNum=head(node0) + node0.waitForBlock(blockNum, blockType=BlockType.lib) + + for genNum in range(0, len(txnGenNodes)): + salt="%d" % genNum + txnGenNodes[genNum].txnGenStart(salt, 1500, 150) + time.sleep(1) + + blockNum=head(node0) + node0.waitForBlock(blockNum+20) + + twoRounds=21*2*12 + for catchup_num in range(0, catchupCount): cluster.launchUnstarted(cachePopen=True) - retryCount=0 - # verify that production node is advancing (sanity check) - while lib(node0)<=lastLibNum: - time.sleep(4) - retryCount+=1 - # give it some more time if the head is still moving forward - if retryCount>=20 or head(node0)<=lastHeadNum: - Utils.errorExit("Node 0 failing to advance lib. Was %s, now %s." % (lastLibNum, lib(node0))) - if Utils.Debug: Utils.Print("Node 0 head was %s, now %s. Waiting for lib to advance" % (lastLibNum, lib(node0))) - lastHeadNum=head(node0) + lastLibNum=lib(node0) + # verify producer lib is still advancing + node0.waitForBlock(lastLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) catchupNode=cluster.getNodes()[-1] - time.sleep(9) + catchupNodeNum=cluster.getNodes().index(catchupNode) lastCatchupLibNum=lib(catchupNode) - lastCatchupHeadNum=head(catchupNode) - retryCount=0 - while lib(catchupNode)<=lastCatchupLibNum: - time.sleep(5) - retryCount+=1 - # give it some more time if the head is still moving forward - if retryCount>=100 or head(catchupNode)<=lastCatchupHeadNum: - Utils.errorExit("Catchup Node %s failing to advance lib. Was %s, now %s." % - (cluster.getNodes().index(catchupNode), lastCatchupLibNum, lib(catchupNode))) - if Utils.Debug: Utils.Print("Catchup Node %s head was %s, now %s. Waiting for lib to advance" % (cluster.getNodes().index(catchupNode), lastCatchupLibNum, lib(catchupNode))) - lastCatchupHeadNum=head(catchupNode) - - retryCount=0 - lastLibNum=lib(node0) - trailingLibNum=lastLibNum-lib(catchupNode) - lastHeadNum=head(node0) - libNotMovingCount=0 - while trailingLibNum>0: - delay=5 - time.sleep(delay) - libMoving=lib(catchupNode)>lastCatchupLibNum - if libMoving: - trailingLibNum=lastLibNum-lib(catchupNode) - libNotMovingCount=0 - else: - libNotMovingCount+=1 - if Utils.Debug and libNotMovingCount%10==0: - Utils.Print("Catchup node %s lib has not moved for %s seconds, lib is %s" % - (cluster.getNodes().index(catchupNode), (delay*libNotMovingCount), lib(catchupNode))) - retryCount+=1 - # give it some more time if the head is still moving forward - if retryCount>=retryCountMax or head(catchupNode)<=lastCatchupHeadNum or libNotMovingCount>100: - Utils.errorExit("Catchup Node %s failing to advance lib along with node 0. Catchup node lib is %s, node 0 lib is %s." % - (cluster.getNodes().index(catchupNode), lib(catchupNode), lastLibNum)) - if Utils.Debug: Utils.Print("Catchup Node %s head is %s, node 0 head is %s. Waiting for lib to advance from %s to %s" % (cluster.getNodes().index(catchupNode), head(catchupNode), head(node0), lib(catchupNode), lastLibNum)) - lastCatchupHeadNum=head(catchupNode) + # verify lib is advancing (before we wait for it to have to catchup with producer) + catchupNode.waitForBlock(lastCatchupLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + + numBlocksToCatchup=(lastLibNum-lastCatchupLibNum-1)+twoRounds + catchupNode.waitForBlock(lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) catchupNode.interruptAndVerifyExitStatus(60) - retryCountMax*=3 + + catchupNode.relaunch(catchupNodeNum) + lastCatchupLibNum=lib(catchupNode) + # verify catchup node is advancing to producer + catchupNode.waitForBlock(lastCatchupLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + + lastLibNum=lib(node0) + # verify producer lib is still advancing + node0.waitForBlock(lastLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + # verify catchup node is advancing to producer + catchupNode.waitForBlock(lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) testSuccessful=True From 722ac062fc3cc1469328bfc273c6b0a2964790a6 Mon Sep 17 00:00:00 2001 From: UMU Date: Mon, 25 Mar 2019 15:31:23 +0800 Subject: [PATCH 25/94] Improve for MongoDB sharding --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 29 ++++++++++++--------- 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 8131b6a2bb2..2ba100bdc84 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -1462,39 +1462,44 @@ void mongo_db_plugin_impl::init() { } try { + // Due to the vast amounts of data, we suggest MongoDB administrators: + // 1. enableSharding database (default to EOS) + // 2. shardCollection: blocks, action_traces, transaction_traces, especially action_traces + // 3. Use compound index with shard key (default to _id), to improve query performance. + // blocks indexes auto blocks = mongo_conn[db_name][blocks_col]; - blocks.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); - blocks.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); + blocks.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1, "_id" : 1 })xxx" )); + blocks.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1, "_id" : 1 })xxx" )); auto block_states = mongo_conn[db_name][block_states_col]; - block_states.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); - block_states.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); + block_states.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1, "_id" : 1 })xxx" )); + block_states.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1, "_id" : 1 })xxx" )); // accounts indexes - accounts.create_index( bsoncxx::from_json( R"xxx({ "name" : 1 })xxx" )); + accounts.create_index( bsoncxx::from_json( R"xxx({ "name" : 1, "_id" : 1 })xxx" )); // transactions indexes auto trans = mongo_conn[db_name][trans_col]; - trans.create_index( bsoncxx::from_json( R"xxx({ "trx_id" : 1 })xxx" )); + trans.create_index( bsoncxx::from_json( R"xxx({ "trx_id" : 1, "_id" : 1 })xxx" )); auto trans_trace = mongo_conn[db_name][trans_traces_col]; - trans_trace.create_index( bsoncxx::from_json( R"xxx({ "id" : 1 })xxx" )); + trans_trace.create_index( bsoncxx::from_json( R"xxx({ "id" : 1, "_id" : 1 })xxx" )); // action traces indexes auto action_traces = mongo_conn[db_name][action_traces_col]; - action_traces.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); + action_traces.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1, "_id" : 1 })xxx" )); // pub_keys indexes auto pub_keys = mongo_conn[db_name][pub_keys_col]; - pub_keys.create_index( bsoncxx::from_json( R"xxx({ "account" : 1, "permission" : 1 })xxx" )); - pub_keys.create_index( bsoncxx::from_json( R"xxx({ "public_key" : 1 })xxx" )); + pub_keys.create_index( bsoncxx::from_json( R"xxx({ "account" : 1, "permission" : 1, "_id" : 1 })xxx" )); + pub_keys.create_index( bsoncxx::from_json( R"xxx({ "public_key" : 1, "_id" : 1 })xxx" )); // account_controls indexes auto account_controls = mongo_conn[db_name][account_controls_col]; account_controls.create_index( - bsoncxx::from_json( R"xxx({ "controlled_account" : 1, "controlled_permission" : 1 })xxx" )); - account_controls.create_index( bsoncxx::from_json( R"xxx({ "controlling_account" : 1 })xxx" )); + bsoncxx::from_json( R"xxx({ "controlled_account" : 1, "controlled_permission" : 1, "_id" : 1 })xxx" )); + account_controls.create_index( bsoncxx::from_json( R"xxx({ "controlling_account" : 1, "_id" : 1 })xxx" )); } catch (...) { handle_mongo_exception( "create indexes", __LINE__ ); From 77f519f20bade5917ff7a9e4663ac38c648b7e78 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 07:17:51 -0500 Subject: [PATCH 26/94] Update fc to fc with set_os_thread_name --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 0c348cc9af4..73f2d256ed0 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 0c348cc9af47d71af57e6926fd64848594a78658 +Subproject commit 73f2d256ed04d6ad0e4b2ac2507b0e7981c51803 From d2c8a7e076e23b9f1f5cbe77383a6d90a5bb137f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:17:17 -0500 Subject: [PATCH 27/94] Name bnet threads --- plugins/bnet_plugin/bnet_plugin.cpp | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/plugins/bnet_plugin/bnet_plugin.cpp b/plugins/bnet_plugin/bnet_plugin.cpp index b788d833503..08d2091040f 100644 --- a/plugins/bnet_plugin/bnet_plugin.cpp +++ b/plugins/bnet_plugin/bnet_plugin.cpp @@ -51,6 +51,7 @@ #include #include +#include #include #include @@ -1398,7 +1399,13 @@ namespace eosio { my->_socket_threads.reserve( my->_num_threads ); for( auto i = 0; i < my->_num_threads; ++i ) { - my->_socket_threads.emplace_back( [&ioc]{ wlog( "start thread" ); ioc.run(); wlog( "end thread" ); } ); + my->_socket_threads.emplace_back( [&ioc, i]{ + std::string tn = "bnet-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + wlog( "start thread" ); + ioc.run(); + wlog( "end thread" ); + } ); } for( const auto& peer : my->_connect_to_peers ) { From f4889220e68ad1df90ece9976d362c3e2f5ecf4b Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:17:34 -0500 Subject: [PATCH 28/94] Name http threads --- plugins/http_plugin/http_plugin.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index 7e205736874..fe2b31472e7 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -522,7 +522,11 @@ namespace eosio { my->server_ioc = std::make_shared(); my->server_ioc_work.emplace( boost::asio::make_work_guard(*my->server_ioc) ); for( uint16_t i = 0; i < my->thread_pool_size; ++i ) { - boost::asio::post( *my->thread_pool, [ioc = my->server_ioc]() { ioc->run(); } ); + boost::asio::post( *my->thread_pool, [ioc = my->server_ioc, i]() { + std::string tn = "http-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ioc->run(); + } ); } if(my->listen_endpoint) { From ae9c3c9d3e1cc7d4001a4bd142d710cab6d5760e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:17:40 -0500 Subject: [PATCH 29/94] Name mongo_db_plugin consume thread --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 8131b6a2bb2..767d3b4f558 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -10,6 +10,7 @@ #include #include +#include #include #include @@ -1523,7 +1524,10 @@ void mongo_db_plugin_impl::init() { ilog("starting db plugin thread"); - consume_thread = boost::thread([this] { consume_blocks(); }); + consume_thread = boost::thread([this] { + fc::set_os_thread_name( "mongodb" ); + consume_blocks(); + }); startup = false; } From df67f7d68670538303266dc49f20cd7a1b139778 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:18:27 -0500 Subject: [PATCH 30/94] Name main application thread --- programs/nodeos/main.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 7034a03858a..403b5c2b317 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -84,6 +84,7 @@ enum return_codes { int main(int argc, char** argv) { try { + fc::set_os_thread_name( "main" ); app().set_version(eosio::nodeos::config::version); auto root = fc::app_path(); From 804da252887981bb988c18abada21546d5e90151 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:20:04 -0500 Subject: [PATCH 31/94] Name net_plugin server_ioc threads --- plugins/net_plugin/net_plugin.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index a26353ab387..c8e7bf20a6f 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -3016,7 +3017,11 @@ namespace eosio { my->server_ioc_work.emplace( boost::asio::make_work_guard( *my->server_ioc ) ); // currently thread_pool only used for server_ioc for( uint16_t i = 0; i < my->thread_pool_size; ++i ) { - boost::asio::post( *my->thread_pool, [ioc = my->server_ioc]() { ioc->run(); } ); + boost::asio::post( *my->thread_pool, [ioc = my->server_ioc, i]() { + std::string tn = "net-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ioc->run(); + } ); } my->resolver = std::make_shared( std::ref( *my->server_ioc )); From 6c79f2ee270484c3ff352d7146ba41194f3b5e69 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:20:39 -0500 Subject: [PATCH 32/94] Name all threads in chain controller thread pool --- libraries/chain/controller.cpp | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index f3b0a841981..9ee8626a77f 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -21,6 +21,7 @@ #include #include +#include #include #include @@ -1727,7 +1728,25 @@ void controller::add_indices() { my->add_indices(); } +void set_thread_name( boost::asio::thread_pool& tp, uint16_t i, uint16_t sz ) { + std::string tn = "chain-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ++i; + if( i < sz ) { + // post recursively so we consume all the threads + auto fut = eosio::chain::async_thread_pool( tp, [&tp, i, sz]() { + set_thread_name( tp, i, sz ); + }); + fut.wait(); + } +} + void controller::startup( std::function shutdown, const snapshot_reader_ptr& snapshot ) { + // name threads in thread pool for logger + boost::asio::post( get_thread_pool(), [&tp = get_thread_pool(), sz = my->conf.thread_pool_size]() { + set_thread_name( tp, 0, sz ); + }); + my->head = my->fork_db.head(); if( snapshot ) { ilog( "Starting initialization from snapshot, this may take a significant amount of time" ); From 9a34ba52a216fb34a37f95a223fe943eb1dfb48e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:21:16 -0500 Subject: [PATCH 33/94] Name all threads in producer thread pool --- plugins/producer_plugin/producer_plugin.cpp | 24 +++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index a35fa34a9c5..0f9fc79ccea 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -8,9 +8,11 @@ #include #include #include +#include #include #include +#include #include #include @@ -620,6 +622,19 @@ make_keosd_signature_provider(const std::shared_ptr& impl, }; } +void set_thread_name( boost::asio::thread_pool& tp, uint16_t i, uint16_t sz ) { + std::string tn = "prod-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ++i; + if( i < sz ) { + // post recursively so we consume all the threads + auto fut = eosio::chain::async_thread_pool( tp, [&tp, i, sz]() { + set_thread_name( tp, i, sz ); + }); + fut.wait(); + } +} + void producer_plugin::plugin_initialize(const boost::program_options::variables_map& options) { try { my->chain_plug = app().find_plugin(); @@ -690,6 +705,11 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ "producer-threads ${num} must be greater than 0", ("num", thread_pool_size)); my->_thread_pool.emplace( thread_pool_size ); + // name threads in thread pool for logger + boost::asio::post( *my->_thread_pool, [&tp = *my->_thread_pool, sz = thread_pool_size]() { + set_thread_name( tp, 0, sz ); + }); + if( options.count( "snapshots-dir" )) { auto sd = options.at( "snapshots-dir" ).as(); if( sd.is_relative()) { @@ -738,10 +758,10 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ void producer_plugin::plugin_startup() { try { - handle_sighup(); // Sets loggers - ilog("producer plugin: plugin_startup() begin"); + handle_sighup(); // Sets loggers + chain::controller& chain = my->chain_plug->chain(); EOS_ASSERT( my->_producers.empty() || chain.get_read_mode() == chain::db_read_mode::SPECULATIVE, plugin_config_exception, "node cannot have any producer-name configured because block production is impossible when read_mode is not \"speculative\"" ); From bf1bae2822d8ebb017d40926d7a34b04a1d959f2 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:49:03 -0500 Subject: [PATCH 34/94] Revert move of ilog message --- plugins/producer_plugin/producer_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 0f9fc79ccea..0754d1248c1 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -758,10 +758,10 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ void producer_plugin::plugin_startup() { try { - ilog("producer plugin: plugin_startup() begin"); - handle_sighup(); // Sets loggers + ilog("producer plugin: plugin_startup() begin"); + chain::controller& chain = my->chain_plug->chain(); EOS_ASSERT( my->_producers.empty() || chain.get_read_mode() == chain::db_read_mode::SPECULATIVE, plugin_config_exception, "node cannot have any producer-name configured because block production is impossible when read_mode is not \"speculative\"" ); From 32540b3eea84928e5f45740ee7ef51943f85d3a6 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 12:10:59 -0500 Subject: [PATCH 35/94] Fix for tests which were destroying controller before all set_thread_name finished causing deadlock. --- libraries/chain/controller.cpp | 3 ++- plugins/producer_plugin/producer_plugin.cpp | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 9ee8626a77f..fc71d06a42a 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1743,9 +1743,10 @@ void set_thread_name( boost::asio::thread_pool& tp, uint16_t i, uint16_t sz ) { void controller::startup( std::function shutdown, const snapshot_reader_ptr& snapshot ) { // name threads in thread pool for logger - boost::asio::post( get_thread_pool(), [&tp = get_thread_pool(), sz = my->conf.thread_pool_size]() { + auto fut = eosio::chain::async_thread_pool( get_thread_pool(), [&tp = get_thread_pool(), sz = my->conf.thread_pool_size]() { set_thread_name( tp, 0, sz ); }); + fut.wait(); my->head = my->fork_db.head(); if( snapshot ) { diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 0754d1248c1..84fb3866012 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -706,9 +706,10 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ my->_thread_pool.emplace( thread_pool_size ); // name threads in thread pool for logger - boost::asio::post( *my->_thread_pool, [&tp = *my->_thread_pool, sz = thread_pool_size]() { + auto fut = eosio::chain::async_thread_pool( *my->_thread_pool, [&tp = *my->_thread_pool, sz = thread_pool_size]() { set_thread_name( tp, 0, sz ); }); + fut.wait(); if( options.count( "snapshots-dir" )) { auto sd = options.at( "snapshots-dir" ).as(); From bb0646b62d2fbdc17db38c986b364250e880ff52 Mon Sep 17 00:00:00 2001 From: Joseph J Guerra <8146030+josephjguerra@users.noreply.github.com> Date: Tue, 26 Mar 2019 14:06:52 -0400 Subject: [PATCH 36/94] Create CONTRIBUTING.md --- CONTRIBUTING.md | 148 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000000..40ecbf9cea8 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,148 @@ +# Contributing to eos + +Interested in contributing? That's awesome! Here are some guidelines to get started quickly and easily: + +- [Reporting An Issue](#reporting-an-issue) + - [Bug Reports](#bug-reports) + - [Feature Requests](#feature-requests) + - [Change Requests](#change-requests) +- [Working on eos](#working-on-eos) + - [Feature Branches](#feature-branches) + - [Submitting Pull Requests](#submitting-pull-requests) + - [Testing and Quality Assurance](#testing-and-quality-assurance) +- [Conduct](#conduct) +- [Contributor License & Acknowledgments](#contributor-license--acknowledgments) +- [References](#references) + +## Reporting An Issue + +If you're about to raise an issue because you think you've found a problem with eos, or you'd like to make a request for a new feature in the codebase, or any other reason… please read this first. + +The GitHub issue tracker is the preferred channel for [bug reports](#bug-reports), [feature requests](#feature-requests), and [submitting pull requests](#submitting-pull-requests), but please respect the following restrictions: + +* Please **search for existing issues**. Help us keep duplicate issues to a minimum by checking to see if someone has already reported your problem or requested your idea. + +* Please **be civil**. Keep the discussion on topic and respect the opinions of others. See also our [Contributor Code of Conduct](#conduct). + +### Bug Reports + +A bug is a _demonstrable problem_ that is caused by the code in the repository. Good bug reports are extremely helpful - thank you! + +Guidelines for bug reports: + +1. **Use the GitHub issue search** — check if the issue has already been + reported. + +1. **Check if the issue has been fixed** — look for [closed issues in the + current milestone](https://github.com/EOSIO/eos/issues?q=is%3Aissue+is%3Aclosed) or try to reproduce it + using the latest `develop` branch. + +A good bug report shouldn't leave others needing to chase you up for more information. Be sure to include the details of your environment and relevant tests that demonstrate the failure. + +[Report a bug](https://github.com/EOSIO/eos/issues/new?title=Bug%3A) + +### Feature Requests + +Feature requests are welcome. Before you submit one be sure to have: + +1. **Use the GitHub search** and check the feature hasn't already been requested. +1. Take a moment to think about whether your idea fits with the scope and aims of the project. +1. Remember, it's up to *you* to make a strong case to convince the project's leaders of the merits of this feature. Please provide as much detail and context as possible, this means explaining the use case and why it is likely to be common. + +### Change Requests + +Change requests cover both architectural and functional changes to how eos works. If you have an idea for a new or different dependency, a refactor, or an improvement to a feature, etc - please be sure to: + +1. **Use the GitHub search** and check someone else didn't get there first +1. Take a moment to think about the best way to make a case for, and explain what you're thinking. Are you sure this shouldn't really be + a [bug report](#bug-reports) or a [feature request](#feature-requests)? Is it really one idea or is it many? What's the context? What problem are you solving? Why is what you are suggesting better than what's already there? + +## Working on eos + +Code contributions are welcome and encouraged! If you are looking for a good place to start, check out the [good first issue](https://github.com/EOSIO/eos/labels/good%20first%20issue) label in GitHub issues. + +Also, please follow these guidelines when submitting code: + +### Feature Branches + +To get it out of the way: + +- **[develop](https://github.com/EOSIO/eos/tree/develop)** is the development branch. All work on the next release happens here so you should generally branch off `develop`. Do **NOT** use this branch for a production site. +- **[master](https://github.com/EOSIO/eos/tree/master)** contains the latest release of eos. This branch may be used in production. Do **NOT** use this branch to work on eos's source. + +### Submitting Pull Requests + +Pull requests are awesome. If you're looking to raise a PR for something which doesn't have an open issue, please think carefully about [raising an issue](#reporting-an-issue) which your PR can close, especially if you're fixing a bug. This makes it more likely that there will be enough information available for your PR to be properly tested and merged. + +### Testing and Quality Assurance + +Never underestimate just how useful quality assurance is. If you're looking to get involved with the code base and don't know where to start, checking out and testing a pull request is one of the most useful things you could do. + +Essentially, [check out the latest develop branch](#working-on-eos), take it for a spin, and if you find anything odd, please follow the [bug report guidelines](#bug-reports) and let us know! + +## Conduct + +While contributing, please be respectful and constructive, so that participation in our project is a positive experience for everyone. + +Examples of behavior that contributes to creating a positive environment include: +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior include: +- The use of sexualized language or imagery and unwelcome sexual attention or advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others’ private information, such as a physical or electronic address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a professional setting + +## Contributor License & Acknowledgments + +Whenever you make a contribution to this project, you license your contribution under the same terms as set out in LICENSE, and you represent and warrant that you have the right to license your contribution under those terms. Whenever you make a contribution to this project, you also certify in the terms of the Developer’s Certificate of Origin set out below: + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +## References + +* Overall CONTRIB adapted from https://github.com/mathjax/MathJax/blob/master/CONTRIBUTING.md +* Conduct section adapted from the Contributor Covenant, version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html From 7faca6c888a2e90c8cf0bf74bec41852a342d3f1 Mon Sep 17 00:00:00 2001 From: Joseph J Guerra <8146030+josephjguerra@users.noreply.github.com> Date: Tue, 26 Mar 2019 14:07:20 -0400 Subject: [PATCH 37/94] Update LICENSE --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 1516b96cbdf..31dee1d933c 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2019 Respective Authors all rights reserved. +Copyright (c) 2017-2019 block.one all rights reserved. The MIT License From cee6dea42fce72c3d3f313a4a7438acfe8d9dc43 Mon Sep 17 00:00:00 2001 From: Joseph J Guerra <8146030+josephjguerra@users.noreply.github.com> Date: Tue, 26 Mar 2019 14:10:05 -0400 Subject: [PATCH 38/94] Update README.md --- README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/README.md b/README.md index e22a2b2cebc..7bce246fbc3 100644 --- a/README.md +++ b/README.md @@ -105,3 +105,17 @@ EOSIO currently supports the following operating systems: ## Getting Started Instructions detailing the process of getting the software, building it, running a simple test network that produces blocks, account creation and uploading a sample contract to the blockchain can be found in [Getting Started](https://developers.eos.io/eosio-home/docs) on the [EOSIO Developer Portal](https://developers.eos.io). + +## Contributing + +[Contributing Guide](./CONTRIBUTING.md) + +[Code of Conduct](./CONTRIBUTING.md#conduct) + +## License + +[MIT](./LICENSE) + +## Important + +See LICENSE for copyright and license terms. Block.one makes its contribution on a voluntary basis as a member of the EOSIO community and is not responsible for ensuring the overall performance of the software or any related applications. We make no representation, warranty, guarantee or undertaking in respect of the software or any related documentation, whether expressed or implied, including but not limited to the warranties or merchantability, fitness for a particular purpose and noninfringement. In no event shall we be liable for any claim, damages or other liability, whether in an action of contract, tort or otherwise, arising from, out of or in connection with the software or documentation or the use or other dealings in the software or documentation. Any test results or performance figures are indicative and will not reflect performance under all conditions. Any reference to any third party or third-party product, service or other resource is not an endorsement or recommendation by Block.one. We are not responsible, and disclaim any and all responsibility and liability, for your use of or reliance on any of these resources. Third-party resources may be updated, changed or terminated at any time, so the information here may be out of date or inaccurate. From b4f8d70b89b1309943dbaee551da32234bd9e392 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 20:28:13 -0500 Subject: [PATCH 39/94] Do not name main thread since some tests expect it to be nodeos --- programs/nodeos/main.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 403b5c2b317..7034a03858a 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -84,7 +84,6 @@ enum return_codes { int main(int argc, char** argv) { try { - fc::set_os_thread_name( "main" ); app().set_version(eosio::nodeos::config::version); auto root = fc::app_path(); From ccd6e53dc44b76c5351424d1896955574e230248 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Mar 2019 12:19:03 -0500 Subject: [PATCH 40/94] Attempt to make comment clearer --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 2ba100bdc84..0adb1670068 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -1462,10 +1462,10 @@ void mongo_db_plugin_impl::init() { } try { - // Due to the vast amounts of data, we suggest MongoDB administrators: + // MongoDB administrators (to enable sharding) : // 1. enableSharding database (default to EOS) // 2. shardCollection: blocks, action_traces, transaction_traces, especially action_traces - // 3. Use compound index with shard key (default to _id), to improve query performance. + // 3. Compound index with shard key (default to _id below), to improve query performance. // blocks indexes auto blocks = mongo_conn[db_name][blocks_col]; From e019da51863d3d8dc53f46918f3e5fa9abaf10ae Mon Sep 17 00:00:00 2001 From: Joseph J Guerra <8146030+josephjguerra@users.noreply.github.com> Date: Thu, 28 Mar 2019 13:28:47 -0400 Subject: [PATCH 41/94] Update LICENSE --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 31dee1d933c..22d36d65db1 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2017-2019 block.one all rights reserved. +Copyright (c) 2017-2019 block.one and its contributors. All rights reserved. The MIT License From 00e96e1711c2c397dff5ccb70dca542a450d431f Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Thu, 28 Mar 2019 17:15:50 -0400 Subject: [PATCH 42/94] Python 36 for centos7 and amazonlinux1 (#7005) --- .buildkite/pipeline.yml | 44 +++++++++++++++++------------------ scripts/eosio_build_amazon.sh | 6 ++--- scripts/eosio_build_centos.sh | 12 +++++----- 3 files changed, 31 insertions(+), 31 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 57ce31e5a6c..f83249df044 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -17,7 +17,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job timeout: 60 @@ -38,7 +38,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job timeout: 60 @@ -59,7 +59,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job timeout: 60 @@ -80,7 +80,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" workdir: /data/job timeout: 60 @@ -101,7 +101,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job timeout: 60 @@ -122,7 +122,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" workdir: /data/job timeout: 60 @@ -173,7 +173,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job timeout: 60 @@ -193,7 +193,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job timeout: 60 @@ -214,7 +214,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job timeout: 60 @@ -234,7 +234,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job timeout: 60 @@ -255,7 +255,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job timeout: 60 @@ -275,7 +275,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job timeout: 60 @@ -296,7 +296,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" workdir: /data/job timeout: 60 @@ -316,7 +316,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" workdir: /data/job timeout: 60 @@ -337,7 +337,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job timeout: 60 @@ -357,7 +357,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job timeout: 60 @@ -378,7 +378,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" workdir: /data/job timeout: 60 @@ -398,7 +398,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" workdir: /data/job timeout: 60 @@ -501,7 +501,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job env: OS: "ubuntu-16.04" @@ -527,7 +527,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job env: OS: "ubuntu-18.04" @@ -560,7 +560,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" workdir: /data/job env: OS: "fc27" @@ -593,7 +593,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job env: OS: "el7" diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 7a16e4486e9..ff655496a7b 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -8,13 +8,13 @@ DISK_AVAIL_KB=$( df . | tail -1 | awk '{print $4}' ) DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) -if [[ "${OS_NAME}" == "Amazon Linux AMI" ]]; then +if [[ "${OS_NAME}" == "Amazon Linux AMI" ]]; then # Amazonlinux1 DEP_ARRAY=( sudo procps util-linux which gcc72 gcc72-c++ autoconf automake libtool make doxygen graphviz \ - bzip2 bzip2-devel openssl-devel gmp gmp-devel libstdc++72 python27 python27-devel python34 python34-devel \ + bzip2 bzip2-devel openssl-devel gmp gmp-devel libstdc++72 python27 python27-devel python36 python36-devel \ libedit-devel ncurses-devel swig wget file libcurl-devel libusb1-devel ) -else +else # Amazonlinux2 DEP_ARRAY=( git procps-ng util-linux gcc gcc-c++ autoconf automake libtool make bzip2 \ bzip2-devel openssl-devel gmp-devel libstdc++ libcurl-devel libusbx-devel \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 1c1e97b2fab..8e7044001ab 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -121,7 +121,7 @@ printf "\\n" DEP_ARRAY=( git autoconf automake libtool make bzip2 doxygen graphviz \ bzip2-devel openssl-devel gmp-devel \ - ocaml libicu-devel python python-devel python33 \ + ocaml libicu-devel python python-devel rh-python36 \ gettext-devel file sudo libusbx-devel libcurl-devel ) COUNT=1 @@ -160,10 +160,10 @@ else printf " - No required YUM dependencies to install.\\n\\n" fi -if [ -d /opt/rh/python33 ]; then - printf "Enabling python33...\\n" - source /opt/rh/python33/enable || exit 1 - printf " - Python33 successfully enabled!\\n" +if [ -d /opt/rh/rh-python36 ]; then + printf "Enabling python36...\\n" + source /opt/rh/rh-python36/enable || exit 1 + printf " - Python36 successfully enabled!\\n" fi printf "\\n" @@ -190,7 +190,7 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" -export CPATH="$CPATH:/opt/rh/python33/root/usr/include/python3.3m" # m on the end causes problems with boost finding python3 +export CPATH="$CPATH:/opt/rh/rh-python36/root/usr/include/python3.6m" # m on the end causes problems with boost finding python3 printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then From c33196dcdeb83e82ff14338bad7a52651b2c9544 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Thu, 28 Mar 2019 20:25:48 -0400 Subject: [PATCH 43/94] long-running image version bump (#7011) --- .buildkite/long_running_tests.yml | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml index 6383f57c392..dd0d6cbee9d 100644 --- a/.buildkite/long_running_tests.yml +++ b/.buildkite/long_running_tests.yml @@ -17,7 +17,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job timeout: 60 @@ -38,7 +38,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job timeout: 60 @@ -59,7 +59,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job timeout: 60 @@ -80,7 +80,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" workdir: /data/job timeout: 60 @@ -101,7 +101,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job timeout: 60 @@ -122,7 +122,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" workdir: /data/job timeout: 60 @@ -172,7 +172,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job timeout: 90 @@ -192,7 +192,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job timeout: 90 @@ -212,7 +212,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job timeout: 90 @@ -232,7 +232,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" workdir: /data/job timeout: 90 @@ -252,7 +252,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job timeout: 90 @@ -272,7 +272,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" workdir: /data/job timeout: 90 From cc0eb5b65c808ddaa444a1b4b87cb4dc1e90d2a7 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 29 Mar 2019 14:20:44 -0400 Subject: [PATCH 44/94] Remove boost::thread usage from mongo plugin boost::thread is problematic on some new compiler + old boost combos --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 0adb1670068..25dea46d546 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -16,11 +16,10 @@ #include #include #include -#include -#include -#include #include +#include +#include #include #include @@ -164,9 +163,9 @@ class mongo_db_plugin_impl { std::deque block_state_process_queue; std::deque irreversible_block_state_queue; std::deque irreversible_block_state_process_queue; - boost::mutex mtx; - boost::condition_variable condition; - boost::thread consume_thread; + std::mutex mtx; + std::condition_variable condition; + std::thread consume_thread; std::atomic_bool done{false}; std::atomic_bool startup{true}; fc::optional chain_id; @@ -292,7 +291,7 @@ bool mongo_db_plugin_impl::filter_include( const transaction& trx ) const template void mongo_db_plugin_impl::queue( Queue& queue, const Entry& e ) { - boost::mutex::scoped_lock lock( mtx ); + std::unique_lock lock( mtx ); auto queue_size = queue.size(); if( queue_size > max_queue_size ) { lock.unlock(); @@ -300,7 +299,7 @@ void mongo_db_plugin_impl::queue( Queue& queue, const Entry& e ) { queue_sleep_time += 10; if( queue_sleep_time > 1000 ) wlog("queue size: ${q}", ("q", queue_size)); - boost::this_thread::sleep_for( boost::chrono::milliseconds( queue_sleep_time )); + std::this_thread::sleep_for( std::chrono::milliseconds( queue_sleep_time )); lock.lock(); } else { queue_sleep_time -= 10; @@ -408,7 +407,7 @@ void mongo_db_plugin_impl::consume_blocks() { _account_controls = mongo_conn[db_name][account_controls_col]; while (true) { - boost::mutex::scoped_lock lock(mtx); + std::unique_lock lock(mtx); while ( transaction_metadata_queue.empty() && transaction_trace_queue.empty() && block_state_queue.empty() && @@ -1528,7 +1527,7 @@ void mongo_db_plugin_impl::init() { ilog("starting db plugin thread"); - consume_thread = boost::thread([this] { consume_blocks(); }); + consume_thread = std::thread([this] { consume_blocks(); }); startup = false; } From e718320c7304dddb1cc417a6229ad1a61364231e Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 29 Mar 2019 14:28:25 -0400 Subject: [PATCH 45/94] disable asio's experimental string_view usage on macos Newer stdlibc++s can #error in experimental string_view --- CMakeLists.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 00258c4b86d..694f0814aaa 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -116,6 +116,11 @@ FIND_PACKAGE(Boost 1.67 REQUIRED COMPONENTS locale iostreams) +# Some new stdlibc++s will #error on ; a problem for boost pre-1.69 +if( APPLE AND UNIX ) + add_definitions(-DBOOST_ASIO_DISABLE_STD_EXPERIMENTAL_STRING_VIEW) +endif() + if( WIN32 ) message( STATUS "Configuring EOSIO on WIN32") From 0ce2bfbaa6e387a2986b5188ce5d9fc485f06fee Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 29 Mar 2019 14:46:22 -0400 Subject: [PATCH 46/94] fc sync - Remove fc::shared_ptr & refactor logging code to not use it --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 0c348cc9af4..1f62ef7f68e 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 0c348cc9af47d71af57e6926fd64848594a78658 +Subproject commit 1f62ef7f68efdaa1240bf99b382d4785fd1afcbc From 5636b81b08a0cad8a276f0769fe6144c33015345 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 29 Mar 2019 15:34:56 -0400 Subject: [PATCH 47/94] chainbase sync - Remove boost thread include and unused typedef --- libraries/chainbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chainbase b/libraries/chainbase index a2563660f08..eb2d0c28bc1 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit a2563660f082622ab7a18778f5b91cc91f51c0c3 +Subproject commit eb2d0c28bc1f1328e8a5fc899291336ad487b084 From f6c9d81858fb5bd3e101c0ad72476c30f348c8cd Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Fri, 29 Mar 2019 16:57:36 -0400 Subject: [PATCH 48/94] New disk space requirements (#7023) --- scripts/eosio_build.sh | 4 ++-- scripts/eosio_build_centos.sh | 9 +++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index d3128903097..a97ceaa5058 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -30,9 +30,8 @@ # https://github.com/EOSIO/eos/blob/master/LICENSE ########################################################################## -VERSION=2.1 # Build script version +VERSION=2.2 # Build script version CMAKE_BUILD_TYPE=Release -export DISK_MIN=20 DOXYGEN=false ENABLE_COVERAGE_TESTING=false CORE_SYMBOL_NAME="SYS" @@ -75,6 +74,7 @@ export LLVM_DIR=${LLVM_ROOT}/lib/cmake/llvm export DOXYGEN_VERSION=1_8_14 export DOXYGEN_ROOT=${SRC_LOCATION}/doxygen-${DOXYGEN_VERSION} export TINI_VERSION=0.18.0 +export DISK_MIN=5 # Setup directories mkdir -p $SRC_LOCATION diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 8e7044001ab..621001d0a97 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -160,9 +160,10 @@ else printf " - No required YUM dependencies to install.\\n\\n" fi -if [ -d /opt/rh/rh-python36 ]; then +export PYTHON3PATH="/opt/rh/rh-python36" +if [ -d $PYTHON3PATH ]; then printf "Enabling python36...\\n" - source /opt/rh/rh-python36/enable || exit 1 + source $PYTHON3PATH/enable || exit 1 printf " - Python36 successfully enabled!\\n" fi @@ -190,7 +191,7 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" -export CPATH="$CPATH:/opt/rh/rh-python36/root/usr/include/python3.6m" # m on the end causes problems with boost finding python3 +export CPATH="${CPATH}:${PYTHON3PATH}/root/usr/include/python3.6m" # m on the end causes problems with boost finding python3 printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then @@ -299,7 +300,7 @@ cd .. printf "\\n" function print_instructions() { - printf "source /opt/rh/python33/enable\\n" + printf "source ${PYTHON3PATH}/enable\\n" printf "source /opt/rh/devtoolset-7/enable\\n" return 0 } From 6692f2f39986a3d6a96ec9dddc01133e4c2f7ca4 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sun, 31 Mar 2019 12:30:45 -0500 Subject: [PATCH 49/94] Report better info while trying to identify cluster sync. GH #7034 --- tests/Cluster.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index debfa1464cd..77012324a66 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -517,18 +517,28 @@ def waitOnClusterBlockNumSync(self, targetBlockNum, timeout=None, blockType=Bloc """Wait for all nodes to have targetBlockNum finalized.""" assert(self.nodes) - def doNodesHaveBlockNum(nodes, targetBlockNum, blockType): + def doNodesHaveBlockNum(nodes, targetBlockNum, blockType, printCount): + ret=True for node in nodes: try: if (not node.killed) and (not node.isBlockPresent(targetBlockNum, blockType=blockType)): - return False + ret=False + break except (TypeError) as _: # This can happen if client connects before server is listening - return False + ret=False + break - return True + printCount+=1 + if Utils.Debug and not ret and printCount%5==0: + blockNums=[] + for i in range(0, len(nodes)): + blockNums.append(nodes[i].getBlockNum()) + Utils.Print("Cluster still not in sync, head blocks for nodes: [ %s ]" % (", ".join(blockNums))) + return ret - lam = lambda: doNodesHaveBlockNum(self.nodes, targetBlockNum, blockType) + printCount=0 + lam = lambda: doNodesHaveBlockNum(self.nodes, targetBlockNum, blockType, printCount) ret=Utils.waitForBool(lam, timeout) return ret From 522c69a766ea4778caeea0fdffa95c5f32e4351b Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sun, 31 Mar 2019 12:33:53 -0500 Subject: [PATCH 50/94] Fix test and add verifying that the txn_test_gen_plugin is producing. GH #7034 --- tests/nodeos_startup_catchup.py | 43 ++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index c7f1fa80ae4..e75fe165230 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -77,10 +77,12 @@ Print("Validating system accounts after bootstrap") cluster.validateAccounts(None) + Print("Create txn generate nodes") txnGenNodes=[] for nodeNum in range(txnGenNodeNum, txnGenNodeNum+startedNonProdNodes): txnGenNodes.append(cluster.getNode(nodeNum)) + Print("Create accounts for generated txns") txnGenNodes[0].txnGenCreateTestAccounts(cluster.eosioAccount.name, cluster.eosioAccount.activePrivateKey) def lib(node): @@ -91,43 +93,78 @@ def head(node): node0=cluster.getNode(0) + Print("Wait for account creation to be irreversible") blockNum=head(node0) node0.waitForBlock(blockNum, blockType=BlockType.lib) + Print("Startup txn generation") + period=1500 + transPerPeriod=150 for genNum in range(0, len(txnGenNodes)): salt="%d" % genNum - txnGenNodes[genNum].txnGenStart(salt, 1500, 150) + txnGenNodes[genNum].txnGenStart(salt, period, transPerPeriod) time.sleep(1) blockNum=head(node0) - node0.waitForBlock(blockNum+20) - + timePerBlock=500 + blocksPerPeriod=period/timePerBlock + transactionsPerBlock=transPerPeriod/blocksPerPeriod + steadyStateWait=20 + startBlockNum=blockNum+steadyStateWait + numBlocks=20 + endBlockNum=startBlockNum+numBlocks + node0.waitForBlock(endBlockNum) + transactions=0 + avg=0 + for blockNum in range(startBlockNum, endBlockNum): + block=node0.getBlock(blockNum) + transactions+=len(block["transactions"]) + + avg=transactions / (blockNum - startBlockNum + 1) + + Print("Validate transactions are generating") + minRequiredTransactions=transactionsPerBlock + assert avg>minRequiredTransactions, "Expected to at least receive %s transactions per block, but only getting %s" % (minRequiredTransactions, avg) + + Print("Cycle through catchup scenarios") twoRounds=21*2*12 for catchup_num in range(0, catchupCount): + Print("Start catchup node") cluster.launchUnstarted(cachePopen=True) lastLibNum=lib(node0) + time.sleep(2) # verify producer lib is still advancing node0.waitForBlock(lastLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) catchupNode=cluster.getNodes()[-1] catchupNodeNum=cluster.getNodes().index(catchupNode) lastCatchupLibNum=lib(catchupNode) + + Print("Verify catchup node %s's LIB is advancing" % (catchupNodeNum)) # verify lib is advancing (before we wait for it to have to catchup with producer) catchupNode.waitForBlock(lastCatchupLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + Print("Verify catchup node is advancing to producer") numBlocksToCatchup=(lastLibNum-lastCatchupLibNum-1)+twoRounds catchupNode.waitForBlock(lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) + Print("Shutdown catchup node and validate exit code") catchupNode.interruptAndVerifyExitStatus(60) + Print("Restart catchup node") catchupNode.relaunch(catchupNodeNum) lastCatchupLibNum=lib(catchupNode) + + Print("Verify catchup node is advancing") # verify catchup node is advancing to producer catchupNode.waitForBlock(lastCatchupLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + Print("Verify producer is still advancing LIB") lastLibNum=lib(node0) # verify producer lib is still advancing node0.waitForBlock(lastLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + + Print("Verify catchup node is advancing to producer") # verify catchup node is advancing to producer catchupNode.waitForBlock(lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) From a560fd391ce54936345a8a204484b76b30db5acb Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Sun, 31 Mar 2019 15:48:57 -0400 Subject: [PATCH 51/94] Remove final remnants of boost thread usage from cmake Because this could be the last boost thread reference, we need to tell cmake to still pass thread compiler flags --- CMakeLists.txt | 8 ++++---- libraries/appbase | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 694f0814aaa..17c3df72451 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -104,16 +104,12 @@ IF( WIN32 ) set(BOOST_ALL_DYN_LINK OFF) # force dynamic linking for all libraries ENDIF(WIN32) FIND_PACKAGE(Boost 1.67 REQUIRED COMPONENTS - thread date_time filesystem system program_options - serialization chrono unit_test_framework - context - locale iostreams) # Some new stdlibc++s will #error on ; a problem for boost pre-1.69 @@ -121,6 +117,10 @@ if( APPLE AND UNIX ) add_definitions(-DBOOST_ASIO_DISABLE_STD_EXPERIMENTAL_STRING_VIEW) endif() +set(THREADS_PREFER_PTHREAD_FLAG 1) +find_package(Threads) +link_libraries(Threads::Threads) + if( WIN32 ) message( STATUS "Configuring EOSIO on WIN32") diff --git a/libraries/appbase b/libraries/appbase index 013246f52f1..b6b55f5ff99 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 013246f52f13a7bc129193c3a64e6cd0cea44ac0 +Subproject commit b6b55f5ff993f4be954d2aa556538636fbdaabb4 From 8ea813a6abef9e251c90a8ac68aeaa9e3a5c66cf Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Sun, 31 Mar 2019 15:50:20 -0400 Subject: [PATCH 52/94] When building boost on macos, only build the libraries needed by eosio --- scripts/eosio_build_darwin.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index e418be9a717..224b0839f1d 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -171,7 +171,8 @@ if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ && cd $BOOST_ROOT \ && ./bootstrap.sh --prefix=$BOOST_ROOT \ - && ./b2 -q -j$(sysctl -in machdep.cpu.core_count) install \ + && ./b2 -q -j$(sysctl -in machdep.cpu.core_count) --with-iostreams --with-date_time --with-filesystem \ + --with-system --with-program_options --with-chrono --with-test install \ && cd .. \ && rm -f boost_$BOOST_VERSION.tar.bz2 \ && rm -rf $BOOST_LINK_LOCATION \ From 1ad1f742b62757d69ffb01d79369791581dd42d3 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 29 Mar 2019 15:59:30 -0500 Subject: [PATCH 53/94] Fix for close() called while async_read in-flight --- plugins/net_plugin/net_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index a26353ab387..9b79ae6bf70 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -826,7 +826,6 @@ namespace eosio { fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); cancel_wait(); if( read_delay_timer ) read_delay_timer->cancel(); - pending_message_buffer.reset(); } void connection::txn_send_pending(const vector& ids) { @@ -1886,6 +1885,7 @@ namespace eosio { auto current_endpoint = *endpoint_itr; ++endpoint_itr; c->connecting = true; + c->pending_message_buffer.reset(); connection_wptr weak_conn = c; c->socket->async_connect( current_endpoint, boost::asio::bind_executor( c->strand, [weak_conn, endpoint_itr, this]( const boost::system::error_code& err ) { @@ -2061,7 +2061,7 @@ namespace eosio { [this,weak_conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { app().post( priority::medium, [this,weak_conn, ec, bytes_transferred]() { auto conn = weak_conn.lock(); - if (!conn) { + if (!conn || !conn->connected()) { return; } From cc4d83bcd0e87179baea2a312c1686b1034e61c5 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 29 Mar 2019 17:36:31 -0500 Subject: [PATCH 54/94] Can't call connected(), it checks flag that is only set after first read --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 9b79ae6bf70..268ca1e3359 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2061,7 +2061,7 @@ namespace eosio { [this,weak_conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { app().post( priority::medium, [this,weak_conn, ec, bytes_transferred]() { auto conn = weak_conn.lock(); - if (!conn || !conn->connected()) { + if (!conn || !conn->socket || !conn->socket->is_open()) { return; } From 0f7d853a4a8bea8612b92ce35115368b8f0d79d6 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:17:17 -0500 Subject: [PATCH 55/94] Name bnet threads --- plugins/bnet_plugin/bnet_plugin.cpp | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/plugins/bnet_plugin/bnet_plugin.cpp b/plugins/bnet_plugin/bnet_plugin.cpp index b788d833503..08d2091040f 100644 --- a/plugins/bnet_plugin/bnet_plugin.cpp +++ b/plugins/bnet_plugin/bnet_plugin.cpp @@ -51,6 +51,7 @@ #include #include +#include #include #include @@ -1398,7 +1399,13 @@ namespace eosio { my->_socket_threads.reserve( my->_num_threads ); for( auto i = 0; i < my->_num_threads; ++i ) { - my->_socket_threads.emplace_back( [&ioc]{ wlog( "start thread" ); ioc.run(); wlog( "end thread" ); } ); + my->_socket_threads.emplace_back( [&ioc, i]{ + std::string tn = "bnet-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + wlog( "start thread" ); + ioc.run(); + wlog( "end thread" ); + } ); } for( const auto& peer : my->_connect_to_peers ) { From 92ebd6a55c3d5ce4c1505b590df325d95931f1da Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:17:34 -0500 Subject: [PATCH 56/94] Name http threads --- plugins/http_plugin/http_plugin.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index 7e205736874..fe2b31472e7 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -522,7 +522,11 @@ namespace eosio { my->server_ioc = std::make_shared(); my->server_ioc_work.emplace( boost::asio::make_work_guard(*my->server_ioc) ); for( uint16_t i = 0; i < my->thread_pool_size; ++i ) { - boost::asio::post( *my->thread_pool, [ioc = my->server_ioc]() { ioc->run(); } ); + boost::asio::post( *my->thread_pool, [ioc = my->server_ioc, i]() { + std::string tn = "http-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ioc->run(); + } ); } if(my->listen_endpoint) { From 9015d3063075b5678c972a9f51da478b40f23980 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:17:40 -0500 Subject: [PATCH 57/94] Name mongo_db_plugin consume thread --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 25dea46d546..72a43caf418 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -10,6 +10,7 @@ #include #include +#include #include #include @@ -1527,7 +1528,10 @@ void mongo_db_plugin_impl::init() { ilog("starting db plugin thread"); - consume_thread = std::thread([this] { consume_blocks(); }); + consume_thread = std::thread([this] { + fc::set_os_thread_name( "mongodb" ); + consume_blocks(); + }); startup = false; } From dc4026e711d307164cc59b25d7525941568b05e4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:18:27 -0500 Subject: [PATCH 58/94] Name main application thread --- programs/nodeos/main.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 7034a03858a..403b5c2b317 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -84,6 +84,7 @@ enum return_codes { int main(int argc, char** argv) { try { + fc::set_os_thread_name( "main" ); app().set_version(eosio::nodeos::config::version); auto root = fc::app_path(); From d6bf0b0447d30ba7b875bea2f5ad765b61fdd7b1 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:20:04 -0500 Subject: [PATCH 59/94] Name net_plugin server_ioc threads --- plugins/net_plugin/net_plugin.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index a26353ab387..c8e7bf20a6f 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -3016,7 +3017,11 @@ namespace eosio { my->server_ioc_work.emplace( boost::asio::make_work_guard( *my->server_ioc ) ); // currently thread_pool only used for server_ioc for( uint16_t i = 0; i < my->thread_pool_size; ++i ) { - boost::asio::post( *my->thread_pool, [ioc = my->server_ioc]() { ioc->run(); } ); + boost::asio::post( *my->thread_pool, [ioc = my->server_ioc, i]() { + std::string tn = "net-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ioc->run(); + } ); } my->resolver = std::make_shared( std::ref( *my->server_ioc )); From 5dac7047901ab5e408fd9779e353244cce861d14 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:20:39 -0500 Subject: [PATCH 60/94] Name all threads in chain controller thread pool --- libraries/chain/controller.cpp | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index f3b0a841981..9ee8626a77f 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -21,6 +21,7 @@ #include #include +#include #include #include @@ -1727,7 +1728,25 @@ void controller::add_indices() { my->add_indices(); } +void set_thread_name( boost::asio::thread_pool& tp, uint16_t i, uint16_t sz ) { + std::string tn = "chain-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ++i; + if( i < sz ) { + // post recursively so we consume all the threads + auto fut = eosio::chain::async_thread_pool( tp, [&tp, i, sz]() { + set_thread_name( tp, i, sz ); + }); + fut.wait(); + } +} + void controller::startup( std::function shutdown, const snapshot_reader_ptr& snapshot ) { + // name threads in thread pool for logger + boost::asio::post( get_thread_pool(), [&tp = get_thread_pool(), sz = my->conf.thread_pool_size]() { + set_thread_name( tp, 0, sz ); + }); + my->head = my->fork_db.head(); if( snapshot ) { ilog( "Starting initialization from snapshot, this may take a significant amount of time" ); From c2a07d274c476aa1f4fdbde57d846e0c70bc74f4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:21:16 -0500 Subject: [PATCH 61/94] Name all threads in producer thread pool --- plugins/producer_plugin/producer_plugin.cpp | 24 +++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index a35fa34a9c5..0f9fc79ccea 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -8,9 +8,11 @@ #include #include #include +#include #include #include +#include #include #include @@ -620,6 +622,19 @@ make_keosd_signature_provider(const std::shared_ptr& impl, }; } +void set_thread_name( boost::asio::thread_pool& tp, uint16_t i, uint16_t sz ) { + std::string tn = "prod-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ++i; + if( i < sz ) { + // post recursively so we consume all the threads + auto fut = eosio::chain::async_thread_pool( tp, [&tp, i, sz]() { + set_thread_name( tp, i, sz ); + }); + fut.wait(); + } +} + void producer_plugin::plugin_initialize(const boost::program_options::variables_map& options) { try { my->chain_plug = app().find_plugin(); @@ -690,6 +705,11 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ "producer-threads ${num} must be greater than 0", ("num", thread_pool_size)); my->_thread_pool.emplace( thread_pool_size ); + // name threads in thread pool for logger + boost::asio::post( *my->_thread_pool, [&tp = *my->_thread_pool, sz = thread_pool_size]() { + set_thread_name( tp, 0, sz ); + }); + if( options.count( "snapshots-dir" )) { auto sd = options.at( "snapshots-dir" ).as(); if( sd.is_relative()) { @@ -738,10 +758,10 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ void producer_plugin::plugin_startup() { try { - handle_sighup(); // Sets loggers - ilog("producer plugin: plugin_startup() begin"); + handle_sighup(); // Sets loggers + chain::controller& chain = my->chain_plug->chain(); EOS_ASSERT( my->_producers.empty() || chain.get_read_mode() == chain::db_read_mode::SPECULATIVE, plugin_config_exception, "node cannot have any producer-name configured because block production is impossible when read_mode is not \"speculative\"" ); From e0ecb3f398166f7f7e6962735ca8f7739fff7c7a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:49:03 -0500 Subject: [PATCH 62/94] Revert move of ilog message --- plugins/producer_plugin/producer_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 0f9fc79ccea..0754d1248c1 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -758,10 +758,10 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ void producer_plugin::plugin_startup() { try { - ilog("producer plugin: plugin_startup() begin"); - handle_sighup(); // Sets loggers + ilog("producer plugin: plugin_startup() begin"); + chain::controller& chain = my->chain_plug->chain(); EOS_ASSERT( my->_producers.empty() || chain.get_read_mode() == chain::db_read_mode::SPECULATIVE, plugin_config_exception, "node cannot have any producer-name configured because block production is impossible when read_mode is not \"speculative\"" ); From 34a43e20996a726fec0995f6f05558ead7c9d9f9 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 12:10:59 -0500 Subject: [PATCH 63/94] Fix for tests which were destroying controller before all set_thread_name finished causing deadlock. --- libraries/chain/controller.cpp | 3 ++- plugins/producer_plugin/producer_plugin.cpp | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 9ee8626a77f..fc71d06a42a 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1743,9 +1743,10 @@ void set_thread_name( boost::asio::thread_pool& tp, uint16_t i, uint16_t sz ) { void controller::startup( std::function shutdown, const snapshot_reader_ptr& snapshot ) { // name threads in thread pool for logger - boost::asio::post( get_thread_pool(), [&tp = get_thread_pool(), sz = my->conf.thread_pool_size]() { + auto fut = eosio::chain::async_thread_pool( get_thread_pool(), [&tp = get_thread_pool(), sz = my->conf.thread_pool_size]() { set_thread_name( tp, 0, sz ); }); + fut.wait(); my->head = my->fork_db.head(); if( snapshot ) { diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 0754d1248c1..84fb3866012 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -706,9 +706,10 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ my->_thread_pool.emplace( thread_pool_size ); // name threads in thread pool for logger - boost::asio::post( *my->_thread_pool, [&tp = *my->_thread_pool, sz = thread_pool_size]() { + auto fut = eosio::chain::async_thread_pool( *my->_thread_pool, [&tp = *my->_thread_pool, sz = thread_pool_size]() { set_thread_name( tp, 0, sz ); }); + fut.wait(); if( options.count( "snapshots-dir" )) { auto sd = options.at( "snapshots-dir" ).as(); From 79f507a1b9292020292884fd2bb2c60cabc11a68 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 20:28:13 -0500 Subject: [PATCH 64/94] Do not name main thread since some tests expect it to be nodeos --- programs/nodeos/main.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 403b5c2b317..7034a03858a 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -84,7 +84,6 @@ enum return_codes { int main(int argc, char** argv) { try { - fc::set_os_thread_name( "main" ); app().set_version(eosio::nodeos::config::version); auto root = fc::app_path(); From 7b3f8014bd02dc2209c312f7cedacdec291ed362 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 10:55:06 -0400 Subject: [PATCH 65/94] Update to lastest fc with set_os_thread_name --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 1f62ef7f68e..809c8b7434e 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 1f62ef7f68efdaa1240bf99b382d4785fd1afcbc +Subproject commit 809c8b7434e6797efa8dd1bfba546b551e4d830e From f0e42dae564c523daa89041cff52d1bdf152c73c Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 14:41:24 -0400 Subject: [PATCH 66/94] Use io_context in thread_pool and set thread name before run --- libraries/chain/controller.cpp | 33 ++++++++----------- .../chain/include/eosio/chain/controller.hpp | 2 +- plugins/chain_plugin/chain_plugin.cpp | 2 -- 3 files changed, 14 insertions(+), 23 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index fc71d06a42a..b697f5238b6 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -134,6 +134,7 @@ struct controller_impl { bool trusted_producer_light_validation = false; uint32_t snapshot_head_block = 0; boost::asio::thread_pool thread_pool; + boost::asio::io_context ioc; typedef pair handler_key; map< account_name, map > apply_handlers; @@ -404,6 +405,9 @@ struct controller_impl { } ~controller_impl() { + ioc.stop(); + thread_pool.stop(); + thread_pool.join(); pending.reset(); } @@ -1728,25 +1732,14 @@ void controller::add_indices() { my->add_indices(); } -void set_thread_name( boost::asio::thread_pool& tp, uint16_t i, uint16_t sz ) { - std::string tn = "chain-" + std::to_string( i ); - fc::set_os_thread_name( tn ); - ++i; - if( i < sz ) { - // post recursively so we consume all the threads - auto fut = eosio::chain::async_thread_pool( tp, [&tp, i, sz]() { - set_thread_name( tp, i, sz ); - }); - fut.wait(); - } -} - void controller::startup( std::function shutdown, const snapshot_reader_ptr& snapshot ) { - // name threads in thread pool for logger - auto fut = eosio::chain::async_thread_pool( get_thread_pool(), [&tp = get_thread_pool(), sz = my->conf.thread_pool_size]() { - set_thread_name( tp, 0, sz ); - }); - fut.wait(); + for( uint16_t i = 0; i < my->conf.thread_pool_size; ++i ) { + boost::asio::post( my->ioc, [&ioc = my->ioc, i]() { + std::string tn = "chain-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ioc.run(); + } ); + } my->head = my->fork_db.head(); if( snapshot ) { @@ -1799,8 +1792,8 @@ void controller::abort_block() { my->abort_block(); } -boost::asio::thread_pool& controller::get_thread_pool() { - return my->thread_pool; +boost::asio::io_context& controller::get_thread_pool() { + return my->ioc; } std::future controller::create_block_state_future( const signed_block_ptr& b ) { diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 2aab3179668..9249a5cf226 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -140,7 +140,7 @@ namespace eosio { namespace chain { std::future create_block_state_future( const signed_block_ptr& b ); void push_block( std::future& block_state_future ); - boost::asio::thread_pool& get_thread_pool(); + boost::asio::io_context& get_thread_pool(); const chainbase::database& db()const; diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index dbff9e03cbc..2b8b493392e 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -770,8 +770,6 @@ void chain_plugin::plugin_shutdown() { my->irreversible_block_connection.reset(); my->accepted_transaction_connection.reset(); my->applied_transaction_connection.reset(); - my->chain->get_thread_pool().stop(); - my->chain->get_thread_pool().join(); my->chain.reset(); } From b347269cb5086f9f1e72f3605a848011377828fa Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 16:19:42 -0400 Subject: [PATCH 67/94] Use io_context for thread pool and name threads before run --- plugins/producer_plugin/producer_plugin.cpp | 35 +++++++++------------ 1 file changed, 15 insertions(+), 20 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 84fb3866012..ca50c024d01 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -62,6 +62,7 @@ static appbase::abstract_plugin& _producer_plugin = app().register_plugin; namespace { bool failure_is_subjective(const fc::exception& e, bool deadline_is_subjective) { @@ -135,6 +136,8 @@ class producer_plugin_impl : public std::enable_shared_from_this _thread_pool; + boost::asio::io_context _ioc; + fc::optional _ioc_work; int32_t _max_transaction_time_ms; fc::microseconds _max_irreversible_block_age_us; @@ -353,8 +356,8 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); const auto& cfg = chain.get_global_properties().configuration; - transaction_metadata::create_signing_keys_future( trx, *_thread_pool, chain.get_chain_id(), fc::microseconds( cfg.max_transaction_cpu_usage ) ); - boost::asio::post( *_thread_pool, [self = this, trx, persist_until_expired, next]() { + transaction_metadata::create_signing_keys_future( trx, _ioc, chain.get_chain_id(), fc::microseconds( cfg.max_transaction_cpu_usage ) ); + boost::asio::post( _ioc, [self = this, trx, persist_until_expired, next]() { if( trx->signing_keys_future.valid() ) trx->signing_keys_future.wait(); app().post(priority::low, [self, trx, persist_until_expired, next]() { @@ -622,19 +625,6 @@ make_keosd_signature_provider(const std::shared_ptr& impl, }; } -void set_thread_name( boost::asio::thread_pool& tp, uint16_t i, uint16_t sz ) { - std::string tn = "prod-" + std::to_string( i ); - fc::set_os_thread_name( tn ); - ++i; - if( i < sz ) { - // post recursively so we consume all the threads - auto fut = eosio::chain::async_thread_pool( tp, [&tp, i, sz]() { - set_thread_name( tp, i, sz ); - }); - fut.wait(); - } -} - void producer_plugin::plugin_initialize(const boost::program_options::variables_map& options) { try { my->chain_plug = app().find_plugin(); @@ -705,11 +695,14 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ "producer-threads ${num} must be greater than 0", ("num", thread_pool_size)); my->_thread_pool.emplace( thread_pool_size ); - // name threads in thread pool for logger - auto fut = eosio::chain::async_thread_pool( *my->_thread_pool, [&tp = *my->_thread_pool, sz = thread_pool_size]() { - set_thread_name( tp, 0, sz ); - }); - fut.wait(); + my->_ioc_work.emplace( boost::asio::make_work_guard( my->_ioc ) ); + for( uint16_t i = 0; i < thread_pool_size; ++i ) { + boost::asio::post( *my->_thread_pool, [&ioc = my->_ioc, i]() { + std::string tn = "prod-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ioc.run(); + } ); + } if( options.count( "snapshots-dir" )) { auto sd = options.at( "snapshots-dir" ).as(); @@ -805,6 +798,8 @@ void producer_plugin::plugin_shutdown() { edump((e.to_detail_string())); } + my->_ioc_work.reset(); + my->_ioc.stop(); if( my->_thread_pool ) { my->_thread_pool->join(); my->_thread_pool->stop(); From 85bbc90d1d7a64ce355722aaac391b19ffb43b2f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 16:20:27 -0400 Subject: [PATCH 68/94] Use ioc work to prevent io_context::run from exiting --- libraries/chain/controller.cpp | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index b697f5238b6..2bf1015f8d9 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -28,6 +28,7 @@ namespace eosio { namespace chain { using resource_limits::resource_limits_manager; +using ioc_work_t = boost::asio::executor_work_guard; using controller_index_set = index_set< account_index, @@ -135,6 +136,7 @@ struct controller_impl { uint32_t snapshot_head_block = 0; boost::asio::thread_pool thread_pool; boost::asio::io_context ioc; + fc::optional ioc_work; typedef pair handler_key; map< account_name, map > apply_handlers; @@ -405,6 +407,7 @@ struct controller_impl { } ~controller_impl() { + ioc_work.reset(); ioc.stop(); thread_pool.stop(); thread_pool.join(); @@ -1199,7 +1202,7 @@ struct controller_impl { auto& pt = receipt.trx.get(); auto mtrx = std::make_shared( std::make_shared( pt ) ); if( !self.skip_auth_check() ) { - transaction_metadata::create_signing_keys_future( mtrx, thread_pool, chain_id, microseconds::maximum() ); + transaction_metadata::create_signing_keys_future( mtrx, ioc, chain_id, microseconds::maximum() ); } packed_transactions.emplace_back( std::move( mtrx ) ); } @@ -1277,7 +1280,7 @@ struct controller_impl { auto prev = fork_db.get_block( b->previous ); EOS_ASSERT( prev, unlinkable_block_exception, "unlinkable block ${id}", ("id", id)("previous", b->previous) ); - return async_thread_pool( thread_pool, [b, prev]() { + return async_thread_pool( ioc, [b, prev]() { const bool skip_validate_signee = false; return std::make_shared( *prev, move( b ), skip_validate_signee ); } ); @@ -1733,8 +1736,9 @@ void controller::add_indices() { } void controller::startup( std::function shutdown, const snapshot_reader_ptr& snapshot ) { + my->ioc_work.emplace( boost::asio::make_work_guard( my->ioc ) ); for( uint16_t i = 0; i < my->conf.thread_pool_size; ++i ) { - boost::asio::post( my->ioc, [&ioc = my->ioc, i]() { + boost::asio::post( my->thread_pool, [&ioc = my->ioc, i]() { std::string tn = "chain-" + std::to_string( i ); fc::set_os_thread_name( tn ); ioc.run(); From d1b6eb93ee0e3711e25f58adbb6f98e799ef1856 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 16:21:04 -0400 Subject: [PATCH 69/94] Use io_context instead of thread_pool --- libraries/chain/include/eosio/chain/thread_utils.hpp | 2 +- libraries/chain/include/eosio/chain/transaction_metadata.hpp | 2 +- libraries/chain/transaction_metadata.cpp | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/chain/include/eosio/chain/thread_utils.hpp b/libraries/chain/include/eosio/chain/thread_utils.hpp index 31b32cbd91f..bf5932fdf0f 100644 --- a/libraries/chain/include/eosio/chain/thread_utils.hpp +++ b/libraries/chain/include/eosio/chain/thread_utils.hpp @@ -13,7 +13,7 @@ namespace eosio { namespace chain { // async on thread_pool and return future template - auto async_thread_pool( boost::asio::thread_pool& thread_pool, F&& f ) { + auto async_thread_pool( boost::asio::io_context& thread_pool, F&& f ) { auto task = std::make_shared>( std::forward( f ) ); boost::asio::post( thread_pool, [task]() { (*task)(); } ); return task->get_future(); diff --git a/libraries/chain/include/eosio/chain/transaction_metadata.hpp b/libraries/chain/include/eosio/chain/transaction_metadata.hpp index 6136580fa44..ce7189204cb 100644 --- a/libraries/chain/include/eosio/chain/transaction_metadata.hpp +++ b/libraries/chain/include/eosio/chain/transaction_metadata.hpp @@ -52,7 +52,7 @@ class transaction_metadata { const flat_set& recover_keys( const chain_id_type& chain_id ); - static void create_signing_keys_future( const transaction_metadata_ptr& mtrx, boost::asio::thread_pool& thread_pool, + static void create_signing_keys_future( const transaction_metadata_ptr& mtrx, boost::asio::io_context& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit ); }; diff --git a/libraries/chain/transaction_metadata.cpp b/libraries/chain/transaction_metadata.cpp index 482b3c488f7..9935270b037 100644 --- a/libraries/chain/transaction_metadata.cpp +++ b/libraries/chain/transaction_metadata.cpp @@ -24,7 +24,7 @@ const flat_set& transaction_metadata::recover_keys( const chain } void transaction_metadata::create_signing_keys_future( const transaction_metadata_ptr& mtrx, - boost::asio::thread_pool& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit ) { + boost::asio::io_context& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit ) { if( mtrx->signing_keys_future.valid() || mtrx->signing_keys.valid() ) // already created return; From d955af19f06cba7e10c31178d17c71e291dcbc44 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 16:21:37 -0400 Subject: [PATCH 70/94] Update test to run on io_context like in producer_plugin and controller --- unittests/misc_tests.cpp | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index bfaeca76727..0b988452ac2 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -10,6 +10,7 @@ #include #include +#include #include #include @@ -829,20 +830,30 @@ BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { BOOST_CHECK_EQUAL(trx.id(), mtrx->id); BOOST_CHECK_EQUAL(trx.id(), mtrx2->id); - boost::asio::thread_pool thread_pool(5); + using ioc_work_t = boost::asio::executor_work_guard; + const int num_threads = 5; + boost::asio::thread_pool thread_pool( num_threads ); + boost::asio::io_context ioc; + fc::optional ioc_work( boost::asio::make_work_guard( ioc ) ); + for( int i = 0; i < num_threads; ++i) { + boost::asio::post( thread_pool, [&ioc]() { + fc::set_os_thread_name( "misc_test" ); + ioc.run(); + } ); + } BOOST_CHECK( !mtrx->signing_keys_future.valid() ); BOOST_CHECK( !mtrx2->signing_keys_future.valid() ); - transaction_metadata::create_signing_keys_future( mtrx, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); - transaction_metadata::create_signing_keys_future( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::create_signing_keys_future( mtrx, ioc, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::create_signing_keys_future( mtrx2, ioc, test.control->get_chain_id(), fc::microseconds::maximum() ); BOOST_CHECK( mtrx->signing_keys_future.valid() ); BOOST_CHECK( mtrx2->signing_keys_future.valid() ); // no-op - transaction_metadata::create_signing_keys_future( mtrx, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); - transaction_metadata::create_signing_keys_future( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::create_signing_keys_future( mtrx, ioc, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::create_signing_keys_future( mtrx2, ioc, test.control->get_chain_id(), fc::microseconds::maximum() ); auto keys = mtrx->recover_keys( test.control->get_chain_id() ); BOOST_CHECK_EQUAL(1u, keys.size()); @@ -857,6 +868,10 @@ BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { BOOST_CHECK_EQUAL(1u, keys.size()); BOOST_CHECK_EQUAL(public_key, *keys.begin()); + ioc_work.reset(); + ioc.stop(); + thread_pool.stop(); + thread_pool.join(); } FC_LOG_AND_RETHROW() } From fe396671fa9e17d1918a3361c57ade0fa9b46538 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 29 Mar 2019 16:54:49 -0500 Subject: [PATCH 71/94] Use shared_future instead of future since accessed across threads --- .../include/eosio/chain/transaction_metadata.hpp | 11 +++++++---- libraries/chain/transaction_metadata.cpp | 9 ++++++--- plugins/producer_plugin/producer_plugin.cpp | 10 ++++++---- 3 files changed, 19 insertions(+), 11 deletions(-) diff --git a/libraries/chain/include/eosio/chain/transaction_metadata.hpp b/libraries/chain/include/eosio/chain/transaction_metadata.hpp index 6136580fa44..923e5d42f14 100644 --- a/libraries/chain/include/eosio/chain/transaction_metadata.hpp +++ b/libraries/chain/include/eosio/chain/transaction_metadata.hpp @@ -15,6 +15,7 @@ namespace eosio { namespace chain { class transaction_metadata; using transaction_metadata_ptr = std::shared_ptr; +using signing_keys_future_type = std::shared_future>>; /** * This data structure should store context-free cached data about a transaction such as * packed/unpacked/compressed and recovered keys @@ -26,8 +27,7 @@ class transaction_metadata { packed_transaction_ptr packed_trx; fc::microseconds sig_cpu_usage; optional>> signing_keys; - std::future>> - signing_keys_future; + signing_keys_future_type signing_keys_future; bool accepted = false; bool implicit = false; bool scheduled = false; @@ -52,8 +52,11 @@ class transaction_metadata { const flat_set& recover_keys( const chain_id_type& chain_id ); - static void create_signing_keys_future( const transaction_metadata_ptr& mtrx, boost::asio::thread_pool& thread_pool, - const chain_id_type& chain_id, fc::microseconds time_limit ); + // must be called from main application thread + // signing_keys_future should only be accessed by main application thread + static signing_keys_future_type + create_signing_keys_future( const transaction_metadata_ptr& mtrx, boost::asio::thread_pool& thread_pool, + const chain_id_type& chain_id, fc::microseconds time_limit ); }; diff --git a/libraries/chain/transaction_metadata.cpp b/libraries/chain/transaction_metadata.cpp index 482b3c488f7..cbeda6cbec5 100644 --- a/libraries/chain/transaction_metadata.cpp +++ b/libraries/chain/transaction_metadata.cpp @@ -23,10 +23,11 @@ const flat_set& transaction_metadata::recover_keys( const chain return signing_keys->second; } -void transaction_metadata::create_signing_keys_future( const transaction_metadata_ptr& mtrx, - boost::asio::thread_pool& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit ) { +signing_keys_future_type transaction_metadata::create_signing_keys_future( const transaction_metadata_ptr& mtrx, + boost::asio::thread_pool& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit ) +{ if( mtrx->signing_keys_future.valid() || mtrx->signing_keys.valid() ) // already created - return; + return mtrx->signing_keys_future; std::weak_ptr mtrx_wp = mtrx; mtrx->signing_keys_future = async_thread_pool( thread_pool, [time_limit, chain_id, mtrx_wp]() { @@ -41,6 +42,8 @@ void transaction_metadata::create_signing_keys_future( const transaction_metadat } return std::make_tuple( chain_id, cpu_usage, std::move( recovered_pub_keys )); } ); + + return mtrx->signing_keys_future; } diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index a35fa34a9c5..28714d31597 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -351,10 +351,12 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); const auto& cfg = chain.get_global_properties().configuration; - transaction_metadata::create_signing_keys_future( trx, *_thread_pool, chain.get_chain_id(), fc::microseconds( cfg.max_transaction_cpu_usage ) ); - boost::asio::post( *_thread_pool, [self = this, trx, persist_until_expired, next]() { - if( trx->signing_keys_future.valid() ) - trx->signing_keys_future.wait(); + signing_keys_future_type future = + transaction_metadata::create_signing_keys_future( trx, *_thread_pool, chain.get_chain_id(), + fc::microseconds( cfg.max_transaction_cpu_usage ) ); + boost::asio::post( *_thread_pool, [self = this, future, trx, persist_until_expired, next]() { + if( future.valid() ) + future.wait(); app().post(priority::low, [self, trx, persist_until_expired, next]() { self->process_incoming_transaction_async( trx, persist_until_expired, next ); }); From 5b5a0a7d6940251d686ee8debca7b9dfd5b58ae8 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Sat, 30 Mar 2019 08:53:58 -0500 Subject: [PATCH 72/94] Simplify key recovery future logic --- libraries/chain/controller.cpp | 7 ++-- .../eosio/chain/transaction_metadata.hpp | 15 +++++---- libraries/chain/transaction_metadata.cpp | 33 +++++++++---------- libraries/testing/tester.cpp | 15 +++++++-- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 4 +-- plugins/producer_plugin/producer_plugin.cpp | 5 ++- unittests/misc_tests.cpp | 24 +++++++------- 7 files changed, 56 insertions(+), 47 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index f3b0a841981..63f5e740229 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -994,9 +994,10 @@ struct controller_impl { auto start = fc::time_point::now(); const bool check_auth = !self.skip_auth_check() && !trx->implicit; // call recover keys so that trx->sig_cpu_usage is set correctly - const flat_set& recovered_keys = check_auth ? trx->recover_keys( chain_id ) : flat_set(); + const fc::microseconds sig_cpu_usage = check_auth ? std::get<0>( trx->recover_keys( chain_id ) ) : fc::microseconds(); + const flat_set& recovered_keys = check_auth ? std::get<1>( trx->recover_keys( chain_id ) ) : flat_set(); if( !explicit_billed_cpu_time ) { - fc::microseconds already_consumed_time( EOS_PERCENT(trx->sig_cpu_usage.count(), conf.sig_cpu_bill_pct) ); + fc::microseconds already_consumed_time( EOS_PERCENT(sig_cpu_usage.count(), conf.sig_cpu_bill_pct) ); if( start.time_since_epoch() < already_consumed_time ) { start = fc::time_point(); @@ -1194,7 +1195,7 @@ struct controller_impl { auto& pt = receipt.trx.get(); auto mtrx = std::make_shared( std::make_shared( pt ) ); if( !self.skip_auth_check() ) { - transaction_metadata::create_signing_keys_future( mtrx, thread_pool, chain_id, microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool, chain_id, microseconds::maximum() ); } packed_transactions.emplace_back( std::move( mtrx ) ); } diff --git a/libraries/chain/include/eosio/chain/transaction_metadata.hpp b/libraries/chain/include/eosio/chain/transaction_metadata.hpp index 923e5d42f14..9d0c01e0a8c 100644 --- a/libraries/chain/include/eosio/chain/transaction_metadata.hpp +++ b/libraries/chain/include/eosio/chain/transaction_metadata.hpp @@ -16,6 +16,8 @@ namespace eosio { namespace chain { class transaction_metadata; using transaction_metadata_ptr = std::shared_ptr; using signing_keys_future_type = std::shared_future>>; +using recovery_keys_type = std::pair&>; + /** * This data structure should store context-free cached data about a transaction such as * packed/unpacked/compressed and recovered keys @@ -25,8 +27,6 @@ class transaction_metadata { transaction_id_type id; transaction_id_type signed_id; packed_transaction_ptr packed_trx; - fc::microseconds sig_cpu_usage; - optional>> signing_keys; signing_keys_future_type signing_keys_future; bool accepted = false; bool implicit = false; @@ -50,13 +50,14 @@ class transaction_metadata { signed_id = digest_type::hash(*packed_trx); } - const flat_set& recover_keys( const chain_id_type& chain_id ); - // must be called from main application thread - // signing_keys_future should only be accessed by main application thread static signing_keys_future_type - create_signing_keys_future( const transaction_metadata_ptr& mtrx, boost::asio::thread_pool& thread_pool, - const chain_id_type& chain_id, fc::microseconds time_limit ); + start_recover_keys( const transaction_metadata_ptr& mtrx, boost::asio::thread_pool& thread_pool, + const chain_id_type& chain_id, fc::microseconds time_limit ); + + // start_recover_keys must be called first + recovery_keys_type recover_keys( const chain_id_type& chain_id ); + }; diff --git a/libraries/chain/transaction_metadata.cpp b/libraries/chain/transaction_metadata.cpp index cbeda6cbec5..ded655c8d79 100644 --- a/libraries/chain/transaction_metadata.cpp +++ b/libraries/chain/transaction_metadata.cpp @@ -4,35 +4,32 @@ namespace eosio { namespace chain { - -const flat_set& transaction_metadata::recover_keys( const chain_id_type& chain_id ) { +recovery_keys_type transaction_metadata::recover_keys( const chain_id_type& chain_id ) { // Unlikely for more than one chain_id to be used in one nodeos instance - if( !signing_keys || signing_keys->first != chain_id ) { - if( signing_keys_future.valid() ) { - std::tuple> sig_keys = signing_keys_future.get(); - if( std::get<0>( sig_keys ) == chain_id ) { - sig_cpu_usage = std::get<1>( sig_keys ); - signing_keys.emplace( std::get<0>( sig_keys ), std::move( std::get<2>( sig_keys ))); - return signing_keys->second; - } + if( signing_keys_future.valid() ) { + const std::tuple>& sig_keys = signing_keys_future.get(); + if( std::get<0>( sig_keys ) == chain_id ) { + return std::make_pair( std::get<1>( sig_keys ), std::cref( std::get<2>( sig_keys ) ) ); } - flat_set recovered_pub_keys; - sig_cpu_usage = packed_trx->get_signed_transaction().get_signature_keys( chain_id, fc::time_point::maximum(), recovered_pub_keys ); - signing_keys.emplace( chain_id, std::move( recovered_pub_keys )); + EOS_ASSERT( false, chain_id_type_exception, "chain id ${cid} does not match start_recover_keys ${sid}", + ("cid", chain_id)( "sid", std::get<0>( sig_keys ) ) ); } - return signing_keys->second; + + EOS_ASSERT( false, chain_id_type_exception, "start_recover_keys for ${cid} is required", ("cid", chain_id) ); } -signing_keys_future_type transaction_metadata::create_signing_keys_future( const transaction_metadata_ptr& mtrx, - boost::asio::thread_pool& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit ) +signing_keys_future_type transaction_metadata::start_recover_keys( const transaction_metadata_ptr& mtrx, + boost::asio::thread_pool& thread_pool, + const chain_id_type& chain_id, + fc::microseconds time_limit ) { - if( mtrx->signing_keys_future.valid() || mtrx->signing_keys.valid() ) // already created + if( mtrx->signing_keys_future.valid() && std::get<0>( mtrx->signing_keys_future.get() ) == chain_id ) // already created return mtrx->signing_keys_future; std::weak_ptr mtrx_wp = mtrx; mtrx->signing_keys_future = async_thread_pool( thread_pool, [time_limit, chain_id, mtrx_wp]() { fc::time_point deadline = time_limit == fc::microseconds::maximum() ? - fc::time_point::maximum() : fc::time_point::now() + time_limit; + fc::time_point::maximum() : fc::time_point::now() + time_limit; auto mtrx = mtrx_wp.lock(); fc::microseconds cpu_usage; flat_set recovered_pub_keys; diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index a6a77ff2998..63a0788931f 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -346,7 +346,13 @@ namespace eosio { namespace testing { { try { if( !control->pending_block_state() ) _start_block(control->head_block_time() + fc::microseconds(config::block_interval_us)); - auto r = control->push_transaction( std::make_shared(std::make_shared(trx)), deadline, billed_cpu_time_us ); + + auto mtrx = std::make_shared( std::make_shared(trx) ); + auto time_limit = deadline == fc::time_point::maximum() ? + fc::microseconds::maximum() : + fc::microseconds( deadline - fc::time_point::now() ); + transaction_metadata::start_recover_keys( mtrx, control->get_thread_pool(), control->get_chain_id(), time_limit ); + auto r = control->push_transaction( mtrx, deadline, billed_cpu_time_us ); if( r->except_ptr ) std::rethrow_exception( r->except_ptr ); if( r->except ) throw *r->except; return r; @@ -365,7 +371,12 @@ namespace eosio { namespace testing { c = packed_transaction::zlib; } - auto r = control->push_transaction( std::make_shared(trx,c), deadline, billed_cpu_time_us ); + auto time_limit = deadline == fc::time_point::maximum() ? + fc::microseconds::maximum() : + fc::microseconds( deadline - fc::time_point::now() ); + auto mtrx = std::make_shared(trx, c); + transaction_metadata::start_recover_keys( mtrx, control->get_thread_pool(), control->get_chain_id(), time_limit ); + auto r = control->push_transaction( mtrx, deadline, billed_cpu_time_us ); if( r->except_ptr ) std::rethrow_exception( r->except_ptr ); if( r->except) throw *r->except; return r; diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 25dea46d546..e8148850a5a 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -780,8 +780,8 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti } string signing_keys_json; - if( t->signing_keys.valid() ) { - signing_keys_json = fc::json::to_string( t->signing_keys->second ); + if( t->signing_keys_future.valid() ) { + signing_keys_json = fc::json::to_string( std::get<2>( t->signing_keys_future.get() ) ); } else { flat_set keys; trx.get_signature_keys( *chain_id, fc::time_point::maximum(), keys, false ); diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 28714d31597..05c909c55df 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -351,9 +351,8 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); const auto& cfg = chain.get_global_properties().configuration; - signing_keys_future_type future = - transaction_metadata::create_signing_keys_future( trx, *_thread_pool, chain.get_chain_id(), - fc::microseconds( cfg.max_transaction_cpu_usage ) ); + signing_keys_future_type future = transaction_metadata::start_recover_keys( trx, *_thread_pool, + chain.get_chain_id(), fc::microseconds( cfg.max_transaction_cpu_usage ) ); boost::asio::post( *_thread_pool, [self = this, future, trx, persist_until_expired, next]() { if( future.valid() ) future.wait(); diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index bfaeca76727..847ced59872 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -834,28 +834,28 @@ BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { BOOST_CHECK( !mtrx->signing_keys_future.valid() ); BOOST_CHECK( !mtrx2->signing_keys_future.valid() ); - transaction_metadata::create_signing_keys_future( mtrx, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); - transaction_metadata::create_signing_keys_future( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); BOOST_CHECK( mtrx->signing_keys_future.valid() ); BOOST_CHECK( mtrx2->signing_keys_future.valid() ); // no-op - transaction_metadata::create_signing_keys_future( mtrx, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); - transaction_metadata::create_signing_keys_future( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); auto keys = mtrx->recover_keys( test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1u, keys.size()); - BOOST_CHECK_EQUAL(public_key, *keys.begin()); + BOOST_CHECK_EQUAL(1u, keys.second.size()); + BOOST_CHECK_EQUAL(public_key, *keys.second.begin()); // again - keys = mtrx->recover_keys( test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1u, keys.size()); - BOOST_CHECK_EQUAL(public_key, *keys.begin()); + auto keys2 = mtrx->recover_keys( test.control->get_chain_id() ); + BOOST_CHECK_EQUAL(1u, keys2.second.size()); + BOOST_CHECK_EQUAL(public_key, *keys2.second.begin()); - auto keys2 = mtrx2->recover_keys( test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1u, keys.size()); - BOOST_CHECK_EQUAL(public_key, *keys.begin()); + auto keys3 = mtrx2->recover_keys( test.control->get_chain_id() ); + BOOST_CHECK_EQUAL(1u, keys3.second.size()); + BOOST_CHECK_EQUAL(public_key, *keys3.second.begin()); } FC_LOG_AND_RETHROW() } From 3a2d2a2e3c8a63ab5949b8bedf6232104bd8948c Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 09:25:02 -0400 Subject: [PATCH 73/94] Calculate recovery keys instead of asserting if start not called or different chain_id. Restores old behavior. --- .../include/eosio/chain/transaction_metadata.hpp | 3 ++- libraries/chain/transaction_metadata.cpp | 13 ++++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/libraries/chain/include/eosio/chain/transaction_metadata.hpp b/libraries/chain/include/eosio/chain/transaction_metadata.hpp index 9d0c01e0a8c..0847159e6de 100644 --- a/libraries/chain/include/eosio/chain/transaction_metadata.hpp +++ b/libraries/chain/include/eosio/chain/transaction_metadata.hpp @@ -15,7 +15,8 @@ namespace eosio { namespace chain { class transaction_metadata; using transaction_metadata_ptr = std::shared_ptr; -using signing_keys_future_type = std::shared_future>>; +using signing_keys_future_value_type = std::tuple>; +using signing_keys_future_type = std::shared_future; using recovery_keys_type = std::pair&>; /** diff --git a/libraries/chain/transaction_metadata.cpp b/libraries/chain/transaction_metadata.cpp index ded655c8d79..9c33121a5a6 100644 --- a/libraries/chain/transaction_metadata.cpp +++ b/libraries/chain/transaction_metadata.cpp @@ -11,11 +11,18 @@ recovery_keys_type transaction_metadata::recover_keys( const chain_id_type& chai if( std::get<0>( sig_keys ) == chain_id ) { return std::make_pair( std::get<1>( sig_keys ), std::cref( std::get<2>( sig_keys ) ) ); } - EOS_ASSERT( false, chain_id_type_exception, "chain id ${cid} does not match start_recover_keys ${sid}", - ("cid", chain_id)( "sid", std::get<0>( sig_keys ) ) ); } - EOS_ASSERT( false, chain_id_type_exception, "start_recover_keys for ${cid} is required", ("cid", chain_id) ); + // shared_keys_future not created or different chain_id + std::promise p; + flat_set recovered_pub_keys; + const signed_transaction& trn = packed_trx->get_signed_transaction(); + fc::microseconds cpu_usage = trn.get_signature_keys( chain_id, fc::time_point::maximum(), recovered_pub_keys ); + p.set_value( std::make_tuple( chain_id, cpu_usage, std::move( recovered_pub_keys ) ) ); + signing_keys_future = p.get_future().share(); + + const std::tuple>& sig_keys = signing_keys_future.get(); + return std::make_pair( std::get<1>( sig_keys ), std::cref( std::get<2>( sig_keys ) ) ); } signing_keys_future_type transaction_metadata::start_recover_keys( const transaction_metadata_ptr& mtrx, From bdce0ded8c869bc87b05b09a18c887c2adf4fb93 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 14:57:54 -0400 Subject: [PATCH 74/94] Add test for recover_keys without start_recover_keys --- unittests/misc_tests.cpp | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index 847ced59872..f8a49d71cb3 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -857,6 +857,17 @@ BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { BOOST_CHECK_EQUAL(1u, keys3.second.size()); BOOST_CHECK_EQUAL(public_key, *keys3.second.begin()); + // recover keys without first calling start_recover_keys + transaction_metadata_ptr mtrx4 = std::make_shared( std::make_shared( trx, packed_transaction::none) ); + transaction_metadata_ptr mtrx5 = std::make_shared( std::make_shared( trx, packed_transaction::zlib) ); + + auto keys4 = mtrx4->recover_keys( test.control->get_chain_id() ); + BOOST_CHECK_EQUAL(1u, keys4.second.size()); + BOOST_CHECK_EQUAL(public_key, *keys4.second.begin()); + + auto keys5 = mtrx5->recover_keys( test.control->get_chain_id() ); + BOOST_CHECK_EQUAL(1u, keys5.second.size()); + BOOST_CHECK_EQUAL(public_key, *keys5.second.begin()); } FC_LOG_AND_RETHROW() } From 0d7ec43a61b4f0c95e54868de72fb7cade5a770d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 2 Apr 2019 11:04:35 -0400 Subject: [PATCH 75/94] Fix merge issue --- plugins/producer_plugin/producer_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 599a3ec2cf4..37115f17c4a 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -358,7 +358,7 @@ class producer_plugin_impl : public std::enable_shared_from_this Date: Tue, 2 Apr 2019 11:58:12 -0400 Subject: [PATCH 76/94] Add named_thread_pool to reduce duplicated code --- libraries/chain/CMakeLists.txt | 4 +- libraries/chain/controller.cpp | 25 +++--------- .../include/eosio/chain/thread_utils.hpp | 29 ++++++++++++++ libraries/chain/thread_utils.cpp | 40 +++++++++++++++++++ plugins/producer_plugin/producer_plugin.cpp | 23 ++--------- unittests/misc_tests.cpp | 25 +++--------- 6 files changed, 86 insertions(+), 60 deletions(-) create mode 100644 libraries/chain/thread_utils.cpp diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index 2c430fecea0..f2bc4806b30 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -44,9 +44,9 @@ add_library( eosio_chain # global_property_object.cpp # # contracts/chain_initializer.cpp - - + transaction_metadata.cpp + thread_utils.cpp ${HEADERS} ) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 176e743d32c..ac2fddbf159 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -28,7 +28,6 @@ namespace eosio { namespace chain { using resource_limits::resource_limits_manager; -using ioc_work_t = boost::asio::executor_work_guard; using controller_index_set = index_set< account_index, @@ -134,9 +133,7 @@ struct controller_impl { optional subjective_cpu_leeway; bool trusted_producer_light_validation = false; uint32_t snapshot_head_block = 0; - boost::asio::thread_pool thread_pool; - boost::asio::io_context ioc; - fc::optional ioc_work; + named_thread_pool thread_pool; typedef pair handler_key; map< account_name, map > apply_handlers; @@ -188,7 +185,7 @@ struct controller_impl { conf( cfg ), chain_id( cfg.genesis.compute_chain_id() ), read_mode( cfg.read_mode ), - thread_pool( cfg.thread_pool_size ) + thread_pool( "chain", cfg.thread_pool_size ) { #define SET_APP_HANDLER( receiver, contract, action) \ @@ -407,10 +404,7 @@ struct controller_impl { } ~controller_impl() { - ioc_work.reset(); - ioc.stop(); thread_pool.stop(); - thread_pool.join(); pending.reset(); } @@ -1203,7 +1197,7 @@ struct controller_impl { auto& pt = receipt.trx.get(); auto mtrx = std::make_shared( std::make_shared( pt ) ); if( !self.skip_auth_check() ) { - transaction_metadata::start_recover_keys( mtrx, ioc, chain_id, microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool.get_executor(), chain_id, microseconds::maximum() ); } packed_transactions.emplace_back( std::move( mtrx ) ); } @@ -1281,7 +1275,7 @@ struct controller_impl { auto prev = fork_db.get_block( b->previous ); EOS_ASSERT( prev, unlinkable_block_exception, "unlinkable block ${id}", ("id", id)("previous", b->previous) ); - return async_thread_pool( ioc, [b, prev]() { + return async_thread_pool( thread_pool.get_executor(), [b, prev]() { const bool skip_validate_signee = false; return std::make_shared( *prev, move( b ), skip_validate_signee ); } ); @@ -1737,15 +1731,6 @@ void controller::add_indices() { } void controller::startup( std::function shutdown, const snapshot_reader_ptr& snapshot ) { - my->ioc_work.emplace( boost::asio::make_work_guard( my->ioc ) ); - for( uint16_t i = 0; i < my->conf.thread_pool_size; ++i ) { - boost::asio::post( my->thread_pool, [&ioc = my->ioc, i]() { - std::string tn = "chain-" + std::to_string( i ); - fc::set_os_thread_name( tn ); - ioc.run(); - } ); - } - my->head = my->fork_db.head(); if( snapshot ) { ilog( "Starting initialization from snapshot, this may take a significant amount of time" ); @@ -1798,7 +1783,7 @@ void controller::abort_block() { } boost::asio::io_context& controller::get_thread_pool() { - return my->ioc; + return my->thread_pool.get_executor(); } std::future controller::create_block_state_future( const signed_block_ptr& b ) { diff --git a/libraries/chain/include/eosio/chain/thread_utils.hpp b/libraries/chain/include/eosio/chain/thread_utils.hpp index bf5932fdf0f..b3aea3085f5 100644 --- a/libraries/chain/include/eosio/chain/thread_utils.hpp +++ b/libraries/chain/include/eosio/chain/thread_utils.hpp @@ -4,6 +4,8 @@ */ #pragma once +#include +#include #include #include #include @@ -11,6 +13,33 @@ namespace eosio { namespace chain { + /** + * Wrapper class for boost asio thread pool and io_context run. + * Also names threads so that tools like htop can see thread name. + */ + class named_thread_pool { + public: + // name_prefix is name appended with -## of thread. + // short name_prefix (6 chars or under) is recommended as console_appender uses 9 chars for thread name + named_thread_pool( std::string name_prefix, size_t num_threads ); + + // calls stop() + ~named_thread_pool(); + + boost::asio::io_context& get_executor() { return _ioc; } + + // destroy work guard, stop io_context, join thread_pool, and stop thread_pool + void stop(); + + private: + using ioc_work_t = boost::asio::executor_work_guard; + + boost::asio::thread_pool _thread_pool; + boost::asio::io_context _ioc; + fc::optional _ioc_work; + }; + + // async on thread_pool and return future template auto async_thread_pool( boost::asio::io_context& thread_pool, F&& f ) { diff --git a/libraries/chain/thread_utils.cpp b/libraries/chain/thread_utils.cpp new file mode 100644 index 00000000000..1d8a2707c14 --- /dev/null +++ b/libraries/chain/thread_utils.cpp @@ -0,0 +1,40 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ + +#include +#include + +namespace eosio { namespace chain { + + +// +// named_thread_pool +// +named_thread_pool::named_thread_pool( std::string name_prefix, size_t num_threads ) +: _thread_pool( num_threads ) +{ + _ioc_work.emplace( boost::asio::make_work_guard( _ioc ) ); + for( size_t i = 0; i < num_threads; ++i ) { + boost::asio::post( _thread_pool, [&ioc = _ioc, name_prefix, i]() { + std::string tn = name_prefix + "-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ioc.run(); + } ); + } +} + +named_thread_pool::~named_thread_pool() { + stop(); +} + +void named_thread_pool::stop() { + _ioc_work.reset(); + _ioc.stop(); + _thread_pool.join(); + _thread_pool.stop(); +} + + +} } // eosio::chain \ No newline at end of file diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 37115f17c4a..67a90d0887c 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -62,7 +62,6 @@ static appbase::abstract_plugin& _producer_plugin = app().register_plugin; namespace { bool failure_is_subjective(const fc::exception& e, bool deadline_is_subjective) { @@ -135,9 +134,7 @@ class producer_plugin_impl : public std::enable_shared_from_this _producer_watermarks; pending_block_mode _pending_block_mode; transaction_id_with_expiry_index _persistent_transactions; - fc::optional _thread_pool; - boost::asio::io_context _ioc; - fc::optional _ioc_work; + fc::optional _thread_pool; int32_t _max_transaction_time_ms; fc::microseconds _max_irreversible_block_age_us; @@ -356,9 +353,9 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); const auto& cfg = chain.get_global_properties().configuration; - signing_keys_future_type future = transaction_metadata::start_recover_keys( trx, _ioc, + signing_keys_future_type future = transaction_metadata::start_recover_keys( trx, _thread_pool->get_executor(), chain.get_chain_id(), fc::microseconds( cfg.max_transaction_cpu_usage ) ); - boost::asio::post( _ioc, [self = this, future, trx, persist_until_expired, next]() { + boost::asio::post( _thread_pool->get_executor(), [self = this, future, trx, persist_until_expired, next]() { if( future.valid() ) future.wait(); app().post(priority::low, [self, trx, persist_until_expired, next]() { @@ -694,16 +691,7 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ auto thread_pool_size = options.at( "producer-threads" ).as(); EOS_ASSERT( thread_pool_size > 0, plugin_config_exception, "producer-threads ${num} must be greater than 0", ("num", thread_pool_size)); - my->_thread_pool.emplace( thread_pool_size ); - - my->_ioc_work.emplace( boost::asio::make_work_guard( my->_ioc ) ); - for( uint16_t i = 0; i < thread_pool_size; ++i ) { - boost::asio::post( *my->_thread_pool, [&ioc = my->_ioc, i]() { - std::string tn = "prod-" + std::to_string( i ); - fc::set_os_thread_name( tn ); - ioc.run(); - } ); - } + my->_thread_pool.emplace( "prod", thread_pool_size ); if( options.count( "snapshots-dir" )) { auto sd = options.at( "snapshots-dir" ).as(); @@ -799,10 +787,7 @@ void producer_plugin::plugin_shutdown() { edump((e.to_detail_string())); } - my->_ioc_work.reset(); - my->_ioc.stop(); if( my->_thread_pool ) { - my->_thread_pool->join(); my->_thread_pool->stop(); } my->_accepted_block_connection.reset(); diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index 48bfb9ed229..611d9f1f40e 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -7,13 +7,13 @@ #include #include #include +#include #include #include #include #include -#include #include #ifdef NON_VALIDATING_TEST @@ -830,30 +830,20 @@ BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { BOOST_CHECK_EQUAL(trx.id(), mtrx->id); BOOST_CHECK_EQUAL(trx.id(), mtrx2->id); - using ioc_work_t = boost::asio::executor_work_guard; - const int num_threads = 5; - boost::asio::thread_pool thread_pool( num_threads ); - boost::asio::io_context ioc; - fc::optional ioc_work( boost::asio::make_work_guard( ioc ) ); - for( int i = 0; i < num_threads; ++i) { - boost::asio::post( thread_pool, [&ioc]() { - fc::set_os_thread_name( "misc_test" ); - ioc.run(); - } ); - } + named_thread_pool thread_pool( "misc", 5 ); BOOST_CHECK( !mtrx->signing_keys_future.valid() ); BOOST_CHECK( !mtrx2->signing_keys_future.valid() ); - transaction_metadata::start_recover_keys( mtrx, ioc, test.control->get_chain_id(), fc::microseconds::maximum() ); - transaction_metadata::start_recover_keys( mtrx2, ioc, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx2, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); BOOST_CHECK( mtrx->signing_keys_future.valid() ); BOOST_CHECK( mtrx2->signing_keys_future.valid() ); // no-op - transaction_metadata::start_recover_keys( mtrx, ioc, test.control->get_chain_id(), fc::microseconds::maximum() ); - transaction_metadata::start_recover_keys( mtrx2, ioc, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx2, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); auto keys = mtrx->recover_keys( test.control->get_chain_id() ); BOOST_CHECK_EQUAL(1u, keys.second.size()); @@ -880,10 +870,7 @@ BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { BOOST_CHECK_EQUAL(1u, keys5.second.size()); BOOST_CHECK_EQUAL(public_key, *keys5.second.begin()); - ioc_work.reset(); - ioc.stop(); thread_pool.stop(); - thread_pool.join(); } FC_LOG_AND_RETHROW() } From cb224ed1cb9daba38a0d0ff3a3f010abefb5b617 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 2 Apr 2019 14:04:52 -0400 Subject: [PATCH 77/94] Simply by using named_thread_pool --- plugins/http_plugin/http_plugin.cpp | 36 ++++++++--------------------- 1 file changed, 10 insertions(+), 26 deletions(-) diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index fe2b31472e7..befa9686287 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -123,7 +124,6 @@ namespace eosio { using websocket_local_server_type = websocketpp::server; using websocket_server_tls_type = websocketpp::server>; using ssl_context_ptr = websocketpp::lib::shared_ptr; - using io_work_t = boost::asio::executor_work_guard; static bool verbose_http_errors = false; @@ -140,9 +140,7 @@ namespace eosio { websocket_server_type server; uint16_t thread_pool_size = 2; - optional thread_pool; - std::shared_ptr server_ioc; - optional server_ioc_work; + optional thread_pool; std::atomic bytes_in_flight{0}; size_t max_bytes_in_flight = 0; @@ -301,12 +299,12 @@ namespace eosio { con->defer_http_response(); bytes_in_flight += body.size(); app().post( appbase::priority::low, - [ioc = this->server_ioc, &bytes_in_flight = this->bytes_in_flight, handler_itr, + [&ioc = thread_pool->get_executor(), &bytes_in_flight = this->bytes_in_flight, handler_itr, resource{std::move( resource )}, body{std::move( body )}, con]() { try { handler_itr->second( resource, body, - [ioc{std::move(ioc)}, &bytes_in_flight, con]( int code, fc::variant response_body ) { - boost::asio::post( *ioc, [ioc, response_body{std::move( response_body )}, &bytes_in_flight, con, code]() mutable { + [&ioc, &bytes_in_flight, con]( int code, fc::variant response_body ) { + boost::asio::post( ioc, [&ioc, response_body{std::move( response_body )}, &bytes_in_flight, con, code]() mutable { std::string json = fc::json::to_string( response_body ); response_body.clear(); const size_t json_size = json.size(); @@ -340,11 +338,11 @@ namespace eosio { void create_server_for_endpoint(const tcp::endpoint& ep, websocketpp::server>& ws) { try { ws.clear_access_channels(websocketpp::log::alevel::all); - ws.init_asio(&(*server_ioc)); + ws.init_asio( &thread_pool->get_executor() ); ws.set_reuse_addr(true); ws.set_max_http_body_size(max_body_size); // capture server_ioc shared_ptr in http handler to keep it alive while in use - ws.set_http_handler([&, ioc = this->server_ioc](connection_hdl hdl) { + ws.set_http_handler([&](connection_hdl hdl) { handle_http_request>(ws.get_con_from_hdl(hdl)); }); } catch ( const fc::exception& e ){ @@ -518,16 +516,7 @@ namespace eosio { void http_plugin::plugin_startup() { - my->thread_pool.emplace( my->thread_pool_size ); - my->server_ioc = std::make_shared(); - my->server_ioc_work.emplace( boost::asio::make_work_guard(*my->server_ioc) ); - for( uint16_t i = 0; i < my->thread_pool_size; ++i ) { - boost::asio::post( *my->thread_pool, [ioc = my->server_ioc, i]() { - std::string tn = "http-" + std::to_string( i ); - fc::set_os_thread_name( tn ); - ioc->run(); - } ); - } + my->thread_pool.emplace( "http", my->thread_pool_size ); if(my->listen_endpoint) { try { @@ -551,10 +540,10 @@ namespace eosio { if(my->unix_endpoint) { try { my->unix_server.clear_access_channels(websocketpp::log::alevel::all); - my->unix_server.init_asio(&(*my->server_ioc)); + my->unix_server.init_asio( &my->thread_pool->get_executor() ); my->unix_server.set_max_http_body_size(my->max_body_size); my->unix_server.listen(*my->unix_endpoint); - my->unix_server.set_http_handler([&, ioc = my->server_ioc](connection_hdl hdl) { + my->unix_server.set_http_handler([&, &ioc = my->thread_pool->get_executor()](connection_hdl hdl) { my->handle_http_request( my->unix_server.get_con_from_hdl(hdl)); }); my->unix_server.start_accept(); @@ -614,12 +603,7 @@ namespace eosio { if(my->unix_server.is_listening()) my->unix_server.stop_listening(); - if( my->server_ioc_work ) - my->server_ioc_work->reset(); - if( my->server_ioc ) - my->server_ioc->stop(); if( my->thread_pool ) { - my->thread_pool->join(); my->thread_pool->stop(); } } From 1c27590109abc5ec0d412c3da23d462c88dda88e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 2 Apr 2019 14:20:12 -0400 Subject: [PATCH 78/94] Use named_thread_pool to simplify code --- plugins/net_plugin/net_plugin.cpp | 53 +++++++++++-------------------- 1 file changed, 18 insertions(+), 35 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 58a47c8c0c5..9341191beb6 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -158,11 +159,8 @@ namespace eosio { channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; - uint16_t thread_pool_size = 1; // currently used by server_ioc - optional thread_pool; - std::shared_ptr server_ioc; - optional server_ioc_work; - + uint16_t thread_pool_size = 1; + optional thread_pool; void connect(const connection_ptr& c); void connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr); @@ -497,7 +495,7 @@ namespace eosio { peer_block_state_index blk_state; transaction_state_index trx_state; optional peer_requested; // this peer is requesting info from us - std::shared_ptr server_ioc; // keep ioc alive + boost::asio::io_context& server_ioc; boost::asio::io_context::strand strand; socket_ptr socket; @@ -731,9 +729,9 @@ namespace eosio { : blk_state(), trx_state(), peer_requested(), - server_ioc( my_impl->server_ioc ), + server_ioc( my_impl->thread_pool->get_executor() ), strand( app().get_io_service() ), - socket( std::make_shared( std::ref( *my_impl->server_ioc ))), + socket( std::make_shared( my_impl->thread_pool->get_executor() ) ), node_id(), last_handshake_recv(), last_handshake_sent(), @@ -757,7 +755,7 @@ namespace eosio { : blk_state(), trx_state(), peer_requested(), - server_ioc( my_impl->server_ioc ), + server_ioc( my_impl->thread_pool->get_executor() ), strand( app().get_io_service() ), socket( s ), node_id(), @@ -784,8 +782,8 @@ namespace eosio { void connection::initialize() { auto *rnd = node_id.data(); rnd[0] = 0; - response_expected.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); - read_delay_timer.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); + response_expected.reset(new boost::asio::steady_timer( my_impl->thread_pool->get_executor() )); + read_delay_timer.reset(new boost::asio::steady_timer( my_impl->thread_pool->get_executor() )); } bool connection::connected() { @@ -1933,9 +1931,9 @@ namespace eosio { void net_plugin_impl::start_listen_loop() { - auto socket = std::make_shared( std::ref( *server_ioc ) ); - acceptor->async_accept( *socket, [socket, this, ioc = server_ioc]( boost::system::error_code ec ) { - app().post( priority::low, [socket, this, ec, ioc{std::move(ioc)}]() { + auto socket = std::make_shared( my_impl->thread_pool->get_executor() ); + acceptor->async_accept( *socket, [socket, this]( boost::system::error_code ec ) { + app().post( priority::low, [socket, this, ec]() { if( !ec ) { uint32_t visitors = 0; uint32_t from_addr = 0; @@ -2662,8 +2660,8 @@ namespace eosio { } void net_plugin_impl::start_monitors() { - connector_check.reset(new boost::asio::steady_timer( *server_ioc )); - transaction_check.reset(new boost::asio::steady_timer( *server_ioc )); + connector_check.reset(new boost::asio::steady_timer( my_impl->thread_pool->get_executor() )); + transaction_check.reset(new boost::asio::steady_timer( my_impl->thread_pool->get_executor() )); start_conn_timer(connector_period, std::weak_ptr()); start_txn_timer(); } @@ -3012,19 +3010,10 @@ namespace eosio { void net_plugin::plugin_startup() { my->producer_plug = app().find_plugin(); - my->thread_pool.emplace( my->thread_pool_size ); - my->server_ioc = std::make_shared(); - my->server_ioc_work.emplace( boost::asio::make_work_guard( *my->server_ioc ) ); // currently thread_pool only used for server_ioc - for( uint16_t i = 0; i < my->thread_pool_size; ++i ) { - boost::asio::post( *my->thread_pool, [ioc = my->server_ioc, i]() { - std::string tn = "net-" + std::to_string( i ); - fc::set_os_thread_name( tn ); - ioc->run(); - } ); - } + my->thread_pool.emplace( "net", my->thread_pool_size ); - my->resolver = std::make_shared( std::ref( *my->server_ioc )); + my->resolver = std::make_shared( my->thread_pool->get_executor() ); if( my->p2p_address.size() > 0 ) { auto host = my->p2p_address.substr( 0, my->p2p_address.find( ':' )); auto port = my->p2p_address.substr( host.size() + 1, my->p2p_address.size()); @@ -3033,7 +3022,7 @@ namespace eosio { my->listen_endpoint = *my->resolver->resolve( query ); - my->acceptor.reset( new tcp::acceptor( *my->server_ioc ) ); + my->acceptor.reset( new tcp::acceptor( my_impl->thread_pool->get_executor() ) ); if( !my->p2p_server_address.empty() ) { my->p2p_address = my->p2p_server_address; @@ -3053,7 +3042,7 @@ namespace eosio { } } - my->keepalive_timer.reset( new boost::asio::steady_timer( *my->server_ioc ) ); + my->keepalive_timer.reset( new boost::asio::steady_timer( my->thread_pool->get_executor() ) ); my->ticker(); if( my->acceptor ) { @@ -3098,9 +3087,6 @@ namespace eosio { void net_plugin::plugin_shutdown() { try { fc_ilog( logger, "shutdown.." ); - if( my->server_ioc_work ) - my->server_ioc_work->reset(); - if( my->connector_check ) my->connector_check->cancel(); if( my->transaction_check ) @@ -3122,10 +3108,7 @@ namespace eosio { my->connections.clear(); } - if( my->server_ioc ) - my->server_ioc->stop(); if( my->thread_pool ) { - my->thread_pool->join(); my->thread_pool->stop(); } fc_ilog( logger, "exit shutdown" ); From d433884a1dcef19e795b36aafba7c65e7d3bf94c Mon Sep 17 00:00:00 2001 From: Kyle Morgan Date: Wed, 3 Apr 2019 13:09:44 -0400 Subject: [PATCH 79/94] Add git submodule regression check * Add submodule_check.sh, which checks to see if the HEAD of any submodules on a pull-requested branch are older than they were previously. * Add the corresponding buildkite pipeline to run submodule_check.sh. --- .buildkite/pipeline.yml | 8 ++++++++ scripts/submodule_check.sh | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100755 scripts/submodule_check.sh diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index f83249df044..c1500cd77ee 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -614,3 +614,11 @@ steps: - "build/packages/eosio_highsierra.rb" - "build/packages/eosio.rb" timeout: 60 + + - command: | + echo "+++ :microscope: Running git submodule regression check" && \ + ./scripts/submodule_check.sh + label: "Git submodule regression check" + agents: + queue: "automation-large-builder-fleet" + timeout: 240 diff --git a/scripts/submodule_check.sh b/scripts/submodule_check.sh new file mode 100755 index 00000000000..16aace418bf --- /dev/null +++ b/scripts/submodule_check.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +REPO_DIR=`mktemp -d` +git clone "$BUILDKITE_REPO" "$REPO_DIR" +git submodule update --init --recursive +cd "$REPO_DIR" + +declare -A PR_MAP +declare -A BASE_MAP + +echo "getting submodule info for $BUILDKITE_BRANCH" +git checkout "$BUILDKITE_BRANCH" &> /dev/null +git submodule update --init &> /dev/null +while read -r a b; do + PR_MAP[$a]=$b +done < <(git submodule --quiet foreach --recursive 'echo $path `git log -1 --format=%ct`') + +echo "getting submodule info for $BUILDKITE_PULL_REQUEST_BASE_BRANCH" +git checkout "$BUILDKITE_PULL_REQUEST_BASE_BRANCH" &> /dev/null +git submodule update --init &> /dev/null +while read -r a b; do + BASE_MAP[$a]=$b +done < <(git submodule --quiet foreach --recursive 'echo $path `git log -1 --format=%ct`') + +for k in "${!BASE_MAP[@]}"; do + base_ts=${BASE_MAP[$k]} + pr_ts=${PR_MAP[$k]} + echo "submodule $k" + echo " timestamp on $BUILDKITE_BRANCH: $pr_ts" + echo " timestamp on $BUILDKITE_PULL_REQUEST_BASE_BRANCH: $base_ts" + if (( $pr_ts < $base_ts)); then + echo "ERROR: $k is older on $BUILDKITE_BRANCH than $BUILDKITE_PULL_REQUEST_BASE_BRANCH" + exit 1 + fi +done From 8aa24ca6617daecede9c2ea7dc7b6282558db6b8 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 3 Apr 2019 18:13:54 -0400 Subject: [PATCH 80/94] Add back in Docker Hub deprecation that was accidentally removed --- Docker/README.md | 40 +--------------------------------------- 1 file changed, 1 insertion(+), 39 deletions(-) diff --git a/Docker/README.md b/Docker/README.md index 1aa0513cca9..6eade280f9b 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -133,45 +133,7 @@ docker volume rm keosd-data-volume ### Docker Hub -Docker Hub image available from [docker hub](https://hub.docker.com/r/eosio/eos/). -Create a new `docker-compose.yaml` file with the content below - -```bash -version: "3" - -services: - nodeosd: - image: eosio/eos:latest - command: /opt/eosio/bin/nodeosd.sh --data-dir /opt/eosio/bin/data-dir -e --http-alias=nodeosd:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 - hostname: nodeosd - ports: - - 8888:8888 - - 9876:9876 - expose: - - "8888" - volumes: - - nodeos-data-volume:/opt/eosio/bin/data-dir - - keosd: - image: eosio/eos:latest - command: /opt/eosio/bin/keosd --wallet-dir /opt/eosio/bin/data-dir --http-server-address=127.0.0.1:8900 --http-alias=localhost:8900 --http-alias=keosd:8900 - hostname: keosd - links: - - nodeosd - volumes: - - keosd-data-volume:/opt/eosio/bin/data-dir - -volumes: - nodeos-data-volume: - keosd-data-volume: - -``` - -*NOTE:* the default version is the latest, you can change it to what you want - -run `docker pull eosio/eos:latest` - -run `docker-compose up` +Docker Hub images are now deprecated. New build images were discontinued on January 1st, 2019. The existing old images will be removed on June 1st, 2019. ### EOSIO Testnet From 0cfa5b3bce938b348692023612234c4b771b3961 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 4 Apr 2019 11:23:09 -0400 Subject: [PATCH 81/94] Update to fc master with set_os_thread_name --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 809c8b7434e..ae6ec564f0d 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 809c8b7434e6797efa8dd1bfba546b551e4d830e +Subproject commit ae6ec564f0db6d3378348ef6b475042e332e612a From 2b20aa3902ca28a0f75db284baff6719f14533e4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 4 Apr 2019 11:30:01 -0400 Subject: [PATCH 82/94] Fix warning about unneeded capture --- plugins/http_plugin/http_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index befa9686287..3345fcdb68c 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -304,7 +304,7 @@ namespace eosio { try { handler_itr->second( resource, body, [&ioc, &bytes_in_flight, con]( int code, fc::variant response_body ) { - boost::asio::post( ioc, [&ioc, response_body{std::move( response_body )}, &bytes_in_flight, con, code]() mutable { + boost::asio::post( ioc, [response_body{std::move( response_body )}, &bytes_in_flight, con, code]() mutable { std::string json = fc::json::to_string( response_body ); response_body.clear(); const size_t json_size = json.size(); From 3b0261aa86428ef8b612ee2bc76648859757aef5 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Thu, 4 Apr 2019 13:31:10 -0400 Subject: [PATCH 83/94] Removed deprecated operating systems from Buildkite pipelines --- .buildkite/long_running_tests.yml | 118 +------------- .buildkite/pipeline.yml | 260 +++--------------------------- 2 files changed, 28 insertions(+), 350 deletions(-) diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml index dd0d6cbee9d..c242d219b0e 100644 --- a/.buildkite/long_running_tests.yml +++ b/.buildkite/long_running_tests.yml @@ -1,6 +1,5 @@ steps: - - - command: | + - command: | # Ubuntu 16.04 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -21,7 +20,7 @@ steps: workdir: /data/job timeout: 60 - - command: | + - command: | # Ubuntu 18.04 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -42,7 +41,7 @@ steps: workdir: /data/job timeout: 60 - - command: | + - command: | # CentOS 7 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -63,28 +62,7 @@ steps: workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ - label: ":aws: 1 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" - workdir: /data/job - timeout: 60 - - - command: | + - command: | # Amazon Linux 2 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -105,28 +83,7 @@ steps: workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ - label: ":fedora: 27 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" - workdir: /data/job - timeout: 60 - - - command: | + - command: | # macOS Mojave Build echo "--- Creating symbolic link to job directory :file_folder:" sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job echo "+++ Building :hammer:" @@ -140,20 +97,6 @@ steps: artifact_paths: "build.tar.gz" timeout: 60 - - command: | - echo "--- Creating symbolic link to job directory :file_folder:" - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job - echo "+++ Building :hammer:" - ./scripts/eosio_build.sh -y - echo "--- Compressing build directory :compression:" - tar -pczf build.tar.gz build/ - label: ":darwin: High Sierra Build" - agents: - - "role=builder-v2-1" - - "os=high-sierra" - artifact_paths: "build.tar.gz" - timeout: 60 - - wait - command: | # Ubuntu 16.04 Tests @@ -216,26 +159,6 @@ steps: workdir: /data/job timeout: 90 - - command: | # Amazon AWS-1 Linux Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" - echo "+++ :microscope: Running LR Tests" - ./scripts/long-running-test.sh - label: ":aws: 1 LR Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" - workdir: /data/job - timeout: 90 - - command: | # Amazon AWS-2 Linux Tests echo "--- :arrow_down: Downloading Build Directory" buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" @@ -256,37 +179,6 @@ steps: workdir: /data/job timeout: 90 - - command: | # Fedora Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" - echo "+++ :microscope: Running LR Tests" - ./scripts/long-running-test.sh - label: ":fedora: 27 LR Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" - workdir: /data/job - timeout: 90 - - - command: | # High Sierra Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" - echo "+++ :microscope: Running LR Tests" - ln -s "$(pwd)" /data/job && ./scripts/long-running-test.sh - label: ":darwin: High Sierra LR Tests" - agents: - - "role=tester-v2-1" - - "os=high-sierra" - timeout: 90 - - command: | # Mojave Tests echo "--- :arrow_down: Downloading Build Directory" buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index c1500cd77ee..e98cba4902f 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,6 +1,5 @@ steps: - - - command: | + - command: | # Ubuntu 16.04 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -21,7 +20,7 @@ steps: workdir: /data/job timeout: 60 - - command: | + - command: | # Ubuntu 18.04 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -42,7 +41,7 @@ steps: workdir: /data/job timeout: 60 - - command: | + - command: | # CentOS 7 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -63,28 +62,7 @@ steps: workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ - label: ":aws: 1 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" - workdir: /data/job - timeout: 60 - - - command: | + - command: | # Amazon Linux 2 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -105,28 +83,7 @@ steps: workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ - label: ":fedora: 27 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" - workdir: /data/job - timeout: 60 - - - command: | + - command: | # macOS Mojave Build echo "--- Creating symbolic link to job directory :file_folder:" sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job echo "+++ Building :hammer:" @@ -140,20 +97,6 @@ steps: artifact_paths: "build.tar.gz" timeout: 60 - - command: | - echo "--- Creating symbolic link to job directory :file_folder:" - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job - echo "+++ Building :hammer:" - ./scripts/eosio_build.sh -y - echo "--- Compressing build directory :compression:" - tar -pczf build.tar.gz build/ - label: ":darwin: High Sierra Build" - agents: - - "role=builder-v2-1" - - "os=high-sierra" - artifact_paths: "build.tar.gz" - timeout: 60 - - wait # Ubuntu 16.04 Tests @@ -279,47 +222,6 @@ steps: workdir: /data/job timeout: 60 - # Amazon AWS-1 Linux Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" - echo "+++ :microscope: Running Tests" - ./scripts/parallel-test.sh - label: ":aws: 1 Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" - echo "+++ :microscope: Running Tests" - ./scripts/serial-test.sh - label: ":aws: 1 NP Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" - workdir: /data/job - timeout: 60 - # Amazon AWS-2 Linux Tests - command: | echo "--- :arrow_down: Downloading Build Directory" @@ -361,71 +263,6 @@ steps: workdir: /data/job timeout: 60 - # Fedora Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" - echo "+++ :microscope: Running Tests" - ./scripts/parallel-test.sh - label: ":fedora: 27 Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" - echo "+++ :microscope: Running Tests" - ./scripts/serial-test.sh - label: ":fedora: 27 NP Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" - workdir: /data/job - timeout: 60 - - # High Sierra Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" - echo "+++ :microscope: Running Tests" - ln -s "$(pwd)" /data/job - ./scripts/parallel-test.sh - label: ":darwin: High Sierra Tests" - agents: - - "role=tester-v2-1" - - "os=high-sierra" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" - echo "+++ :microscope: Running Tests" - ln -s "$(pwd)" /data/job && ./scripts/serial-test.sh - label: ":darwin: High Sierra NP Tests" - agents: - - "role=tester-v2-1" - - "os=high-sierra" - timeout: 60 - # Mojave Tests - command: | echo "--- :arrow_down: Downloading Build Directory" @@ -452,37 +289,7 @@ steps: - wait - - command: | - echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" - tar -zxf build.tar.gz - echo "+++ :microscope: Starting package build" - ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew - label: ":darwin: High Sierra Package Builder" - agents: - - "role=builder-v2-1" - - "os=high-sierra" - artifact_paths: - - "build/packages/*.tar.gz" - - "build/packages/*.rb" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" - tar -zxf build.tar.gz - echo "+++ :microscope: Starting package build" - ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew - label: ":darwin: Mojave Package Builder" - agents: - - "role=builder-v2-1" - - "os=mojave" - artifact_paths: - - "build/packages/*.tar.gz" - - "build/packages/*.rb" - timeout: 60 - - - command: | + - command: | # Ubuntu 16.04 Package Builder echo "--- :arrow_down: Downloading build directory" buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" tar -zxf build.tar.gz @@ -508,7 +315,7 @@ steps: PKGTYPE: "deb" timeout: 60 - - command: | + - command: | # Ubuntu 18.04 Package Builder echo "--- :arrow_down: Downloading build directory" buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" tar -zxf build.tar.gz @@ -534,40 +341,7 @@ steps: PKGTYPE: "deb" timeout: 60 - - command: | - echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" - tar -zxf build.tar.gz - echo "+++ :microscope: Starting package build" - yum install -y rpm-build - mkdir -p /root/rpmbuild/BUILD - mkdir -p /root/rpmbuild/BUILDROOT - mkdir -p /root/rpmbuild/RPMS - mkdir -p /root/rpmbuild/SOURCES - mkdir -p /root/rpmbuild/SPECS - mkdir -p /root/rpmbuild/SRPMS - cd /data/job/build/packages && bash generate_package.sh rpm - label: ":fedora: 27 Package builder" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build/packages/*.rpm" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" - workdir: /data/job - env: - OS: "fc27" - PKGTYPE: "rpm" - timeout: 60 - - - command: | + - command: | # CentOS 7 Package Builder echo "--- :arrow_down: Downloading build directory" buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" tar -zxf build.tar.gz @@ -600,18 +374,30 @@ steps: PKGTYPE: "rpm" timeout: 60 + - command: | # macOS Mojave Package Builder + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew + label: ":darwin: Mojave Package Builder" + agents: + - "role=builder-v2-1" + - "os=mojave" + artifact_paths: + - "build/packages/*.tar.gz" + - "build/packages/*.rb" + timeout: 60 + - wait - command: | echo "--- :arrow_down: Downloading brew files" - buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: High Sierra Package Builder" - mv build/packages/eosio.rb build/packages/eosio_highsierra.rb buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: Mojave Package Builder" label: ":darwin: Brew Updater" agents: queue: "automation-large-builder-fleet" artifact_paths: - - "build/packages/eosio_highsierra.rb" - "build/packages/eosio.rb" timeout: 60 From cddd437300e4269b6e1be88f187fc75402625f39 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Thu, 4 Apr 2019 13:42:37 -0400 Subject: [PATCH 84/94] YAML is space-sensitive --- .buildkite/pipeline.yml | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index e98cba4902f..19bbdf114ff 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -375,19 +375,19 @@ steps: timeout: 60 - command: | # macOS Mojave Package Builder - echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" - tar -zxf build.tar.gz - echo "+++ :microscope: Starting package build" - ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew - label: ":darwin: Mojave Package Builder" - agents: - - "role=builder-v2-1" - - "os=mojave" - artifact_paths: - - "build/packages/*.tar.gz" - - "build/packages/*.rb" - timeout: 60 + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew + label: ":darwin: Mojave Package Builder" + agents: + - "role=builder-v2-1" + - "os=mojave" + artifact_paths: + - "build/packages/*.tar.gz" + - "build/packages/*.rb" + timeout: 60 - wait From 302270b10d215a6c2451600193d9ddb3419f947d Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 4 Apr 2019 15:01:05 -0400 Subject: [PATCH 85/94] fix rpm command for uninstalling eosio --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7bce246fbc3..e4ec6e0b69d 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ $ sudo yum install ./eosio-1.7.0-rc1.el7.x86_64.rpm ``` #### Centos RPM Package Uninstall ```sh -$ sudo yum remove eosio.cdt +$ sudo yum remove eosio ``` #### Fedora RPM Package Install ```sh From c61b8d35c0126e49a2c45c1b8e23a98c9edf3859 Mon Sep 17 00:00:00 2001 From: Kyle Morgan Date: Thu, 4 Apr 2019 16:35:21 -0400 Subject: [PATCH 86/94] Make the submodule regression check more robust Previously, the git submodule regression check was based on the commit timestamp alone. There are cases where a pull request was branched off an old version of the base branch that would trigger this check, even though the submodule itself was not changed. Now, if the timestamp-based check detects the submodule is out-of-date, the branch's diff is checked as well to see if the submodule was modified. --- scripts/submodule_check.sh | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/scripts/submodule_check.sh b/scripts/submodule_check.sh index 16aace418bf..b9ec13204fa 100755 --- a/scripts/submodule_check.sh +++ b/scripts/submodule_check.sh @@ -29,7 +29,13 @@ for k in "${!BASE_MAP[@]}"; do echo " timestamp on $BUILDKITE_BRANCH: $pr_ts" echo " timestamp on $BUILDKITE_PULL_REQUEST_BASE_BRANCH: $base_ts" if (( $pr_ts < $base_ts)); then - echo "ERROR: $k is older on $BUILDKITE_BRANCH than $BUILDKITE_PULL_REQUEST_BASE_BRANCH" - exit 1 + echo "$k is older on $BUILDKITE_BRANCH than $BUILDKITE_PULL_REQUEST_BASE_BRANCH; investigating..." + + if for c in `git log $BUILDKITE_BRANCH ^$BUILDKITE_PULL_REQUEST_BASE_BRANCH --pretty=format:"%H"`; do git show --pretty="" --name-only $c; done | grep -q "^$k$"; then + echo "ERROR: $k has regressed" + exit 1 + else + echo "$k was not in the diff; no regression detected" + fi fi done From 88093a2902f3772c66e1d0ca48c01e80dc13e2d4 Mon Sep 17 00:00:00 2001 From: Jeeyong Um Date: Sat, 6 Apr 2019 02:59:07 +0900 Subject: [PATCH 87/94] Remove unused skip_flag in base_tester::produce_block --- .../testing/include/eosio/testing/tester.hpp | 26 +++++++++---------- libraries/testing/tester.cpp | 2 +- unittests/snapshot_tests.cpp | 8 +++--- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 4328bda7ee8..265bf770f83 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -89,8 +89,8 @@ namespace eosio { namespace testing { void open( const snapshot_reader_ptr& snapshot ); bool is_same_chain( base_tester& other ); - virtual signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ ) = 0; - virtual signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ ) = 0; + virtual signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) ) = 0; + virtual signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) ) = 0; virtual signed_block_ptr finish_block() = 0; void produce_blocks( uint32_t n = 1, bool empty = false ); void produce_blocks_until_end_of_round(); @@ -281,7 +281,7 @@ namespace eosio { namespace testing { } protected: - signed_block_ptr _produce_block( fc::microseconds skip_time, bool skip_pending_trxs = false, uint32_t skip_flag = 0 ); + signed_block_ptr _produce_block( fc::microseconds skip_time, bool skip_pending_trxs = false ); void _start_block(fc::time_point block_time); signed_block_ptr _finish_block(); @@ -308,13 +308,13 @@ namespace eosio { namespace testing { init(config); } - signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { - return _produce_block(skip_time, false, skip_flag); + signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { + return _produce_block(skip_time, false); } - signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { + signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { control->abort_block(); - return _produce_block(skip_time, true, skip_flag); + return _produce_block(skip_time, true); } signed_block_ptr finish_block()override { @@ -388,16 +388,16 @@ namespace eosio { namespace testing { init(config); } - signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0 /*skip_missed_block_penalty*/ )override { - auto sb = _produce_block(skip_time, false, skip_flag | 2); + signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { + auto sb = _produce_block(skip_time, false); auto bs = validating_node->create_block_state_future( sb ); validating_node->push_block( bs ); return sb; } - signed_block_ptr produce_block_no_validation( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0 /*skip_missed_block_penalty*/ ) { - return _produce_block(skip_time, false, skip_flag | 2); + signed_block_ptr produce_block_no_validation( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) ) { + return _produce_block(skip_time, false); } void validate_push_block(const signed_block_ptr& sb) { @@ -405,9 +405,9 @@ namespace eosio { namespace testing { validating_node->push_block( bs ); } - signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0 /*skip_missed_block_penalty*/ )override { + signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { control->abort_block(); - auto sb = _produce_block(skip_time, true, skip_flag | 2); + auto sb = _produce_block(skip_time, true); auto bs = validating_node->create_block_state_future( sb ); validating_node->push_block( bs ); diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index 63a0788931f..79e0c95d11c 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -154,7 +154,7 @@ namespace eosio { namespace testing { return b; } - signed_block_ptr base_tester::_produce_block( fc::microseconds skip_time, bool skip_pending_trxs, uint32_t skip_flag) { + signed_block_ptr base_tester::_produce_block( fc::microseconds skip_time, bool skip_pending_trxs) { auto head = control->head_block_state(); auto head_time = control->head_block_time(); auto next_time = head_time + skip_time; diff --git a/unittests/snapshot_tests.cpp b/unittests/snapshot_tests.cpp index c3578e15750..a3749f9656a 100644 --- a/unittests/snapshot_tests.cpp +++ b/unittests/snapshot_tests.cpp @@ -49,13 +49,13 @@ class snapshotted_tester : public base_tester { init(copied_config, snapshot); } - signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { - return _produce_block(skip_time, false, skip_flag); + signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { + return _produce_block(skip_time, false); } - signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { + signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { control->abort_block(); - return _produce_block(skip_time, true, skip_flag); + return _produce_block(skip_time, true); } signed_block_ptr finish_block()override { From 572dd5d5c42f36efb120fabf44cbd73275fe7498 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 5 Apr 2019 14:08:37 -0500 Subject: [PATCH 88/94] Fix for bad alloc for catchup test. --- tests/nodeos_startup_catchup.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index e75fe165230..03a55936385 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -7,6 +7,7 @@ from WalletMgr import WalletMgr from Node import BlockType from Node import Node +import signal from TestHelper import AppArgs from TestHelper import TestHelper @@ -167,6 +168,8 @@ def head(node): Print("Verify catchup node is advancing to producer") # verify catchup node is advancing to producer catchupNode.waitForBlock(lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) + catchupNode.kill(signal.SIGTERM) + catchupNode.popenProc=None testSuccessful=True From 0fe727a587b681b0dd637b6ad8f45ad51fb4ce50 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Fri, 5 Apr 2019 16:40:34 -0400 Subject: [PATCH 89/94] Removed remaining bnet tests --- tests/CMakeLists.txt | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index f2e6958eb15..747c2f85b0f 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -51,12 +51,8 @@ add_test(NAME plugin_test COMMAND plugin_test --report_level=detailed --color_ou add_test(NAME nodeos_sanity_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_sanity_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME nodeos_sanity_bnet_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --p2p-plugin bnet --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_sanity_bnet_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME nodeos_run_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_run_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME nodeos_run_bnet_test COMMAND tests/nodeos_run_test.py -v --clean-run --p2p-plugin bnet --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_run_bnet_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME p2p_dawn515_test COMMAND tests/p2p_tests/dawn_515/test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST p2p_dawn515_test PROPERTY LABELS nonparallelizable_tests) @@ -67,8 +63,6 @@ endif() add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -d 2 -p 4 -n 6 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST distributed-transactions-test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME distributed-transactions-bnet-test COMMAND tests/distributed-transactions-test.py -d 2 -p 1 -n 4 --p2p-plugin bnet -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST distributed-transactions-bnet-test PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-resync COMMAND tests/restart-scenarios-test.py -c resync -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST restart-scenarios-test-resync PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-hard_replay COMMAND tests/restart-scenarios-test.py -c hardReplay -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) @@ -122,7 +116,7 @@ if(ENABLE_COVERAGE_TESTING) endif() # NOT GENHTML_PATH # no spaces allowed within tests list - set(ctest_tests 'plugin_test|p2p_dawn515_test|nodeos_run_test|bnet_nodeos_run_test|distributed-transactions-test|restart-scenarios-test_resync') + set(ctest_tests 'plugin_test|p2p_dawn515_test|nodeos_run_test|distributed-transactions-test|restart-scenarios-test_resync') set(ctest_exclude_tests 'nodeos_run_remote_test|nodeos_run_test-mongodb|distributed-transactions-remote-test|restart-scenarios-test_replay') # Setup target From 2ae9463ebe4235870747cf8634a5d6a152a73395 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 5 Apr 2019 19:53:30 -0400 Subject: [PATCH 90/94] Increase the timeout for nodeos_startup_catchup_lr_test from default of 1500 to 3000 seconds --- tests/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index f2e6958eb15..d57f4d9d81b 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -103,6 +103,7 @@ add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_ set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_startup_catchup_lr_test COMMAND tests/nodeos_startup_catchup.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_tests_properties(nodeos_startup_catchup_lr_test PROPERTIES TIMEOUT 3000) set_property(TEST nodeos_startup_catchup_lr_test PROPERTY LABELS long_running_tests) if(ENABLE_COVERAGE_TESTING) From b70da87923db1612dbce7250568c43a23e617e92 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 8 Apr 2019 08:15:14 -0500 Subject: [PATCH 91/94] Prevent core dump by catching exception --- programs/eosio-launcher/main.cpp | 43 +++++++++++++++++++++----------- 1 file changed, 29 insertions(+), 14 deletions(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 35f12b94e75..7e6bfbaf7b3 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -1673,20 +1673,35 @@ launcher_def::kill (launch_modes mode, string sig_opt) { case LM_LOCAL: case LM_REMOTE : { bfs::path source = "last_run.json"; - fc::json::from_file(source).as(last_run); - for (auto &info : last_run.running_nodes) { - if (mode == LM_ALL || (info.remote && mode == LM_REMOTE) || - (!info.remote && mode == LM_LOCAL)) { - if (info.pid_file.length()) { - string pid; - fc::json::from_file(info.pid_file).as(pid); - string kill_cmd = "kill " + sig_opt + " " + pid; - boost::process::system (kill_cmd); - } - else { - boost::process::system (info.kill_cmd); - } - } + try { + fc::json::from_file( source ).as( last_run ); + for( auto& info : last_run.running_nodes ) { + if( mode == LM_ALL || (info.remote && mode == LM_REMOTE) || + (!info.remote && mode == LM_LOCAL) ) { + try { + if( info.pid_file.length() ) { + string pid; + fc::json::from_file( info.pid_file ).as( pid ); + string kill_cmd = "kill " + sig_opt + " " + pid; + boost::process::system( kill_cmd ); + } else { + boost::process::system( info.kill_cmd ); + } + } catch( fc::exception& fce ) { + cerr << "unable to kill fc::exception=" << fce.to_detail_string() << endl; + } catch( std::exception& stde ) { + cerr << "unable to kill std::exception=" << stde.what() << endl; + } catch( ... ) { + cerr << "Unable to kill" << endl; + } + } + } + } catch( fc::exception& fce ) { + cerr << "unable to open " << source << " fc::exception=" << fce.to_detail_string() << endl; + } catch( std::exception& stde ) { + cerr << "unable to open " << source << " std::exception=" << stde.what() << endl; + } catch( ... ) { + cerr << "Unable to open " << source << endl; } } } From 2b8848c0c6c79569ab9daaff0c5c4e54a753134a Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 10 Apr 2019 11:57:42 +0800 Subject: [PATCH 92/94] Add missing loadSystemContract --- tests/Cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index f9cab40bec1..3ccbabe43aa 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -430,7 +430,7 @@ def connectGroup(group, producerNodes, bridgeNodes) : if not loadSystemContract: useBiosBootFile=False #ensure we use Cluster.bootstrap if onlyBios or not useBiosBootFile: - self.biosNode=self.bootstrap(biosNode, startedNodes, prodCount, totalProducers, pfSetupPolicy, onlyBios, onlySetProds) + self.biosNode=self.bootstrap(biosNode, startedNodes, prodCount, totalProducers, pfSetupPolicy, onlyBios, onlySetProds, loadSystemContract) if self.biosNode is None: Utils.Print("ERROR: Bootstrap failed.") return False From b4e67e5be66599c19f48d3e3201a26d309ee5652 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 10 Apr 2019 14:34:06 +0800 Subject: [PATCH 93/94] Remove unneeded import --- tests/Node.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/Node.py b/tests/Node.py index 16f51464353..408929fac9a 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1564,7 +1564,6 @@ def getActivatedProtocolFeatures(self): return latestBlockHeaderState["activated_protocol_features"]["protocol_features"] def modifyBuiltinPFSubjRestrictions(self, nodeId, featureCodename, subjectiveRestriction={}): - from Cluster import Cluster jsonPath = os.path.join(Utils.getNodeConfigDir(nodeId), "protocol_features", "BUILTIN-{}.json".format(featureCodename)) From 69e4b7e3b79e2f63e582308eb295e882fa1aa056 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 10 Apr 2019 17:12:11 +0800 Subject: [PATCH 94/94] Use processCurlCommand instead of sendRpcApi and remove sendRpcApi --- tests/Node.py | 26 ++--------------- ..._multiple_version_protocol_feature_test.py | 29 ++++++++++--------- 2 files changed, 18 insertions(+), 37 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 408929fac9a..334d9d2e7d5 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -7,10 +7,6 @@ import datetime import json import signal -import urllib.request -import urllib.parse -from urllib.error import HTTPError -import tempfile from core_symbol import CORE_SYMBOL from testUtils import Utils @@ -1455,26 +1451,10 @@ def reportStatus(self): Utils.Print(" hbn : %s (%s)" % (self.lastRetrievedHeadBlockNum, status)) Utils.Print(" lib : %s (%s)" % (self.lastRetrievedLIB, status)) - def sendRpcApi(self, relativeUrl, data={}): - url = urllib.parse.urljoin(self.endpointHttp, relativeUrl) - req = urllib.request.Request(url) - req.add_header('Content-Type', 'application/json; charset=utf-8') - reqData = json.dumps(data).encode("utf-8") - rpcApiResult = None - try: - response = urllib.request.urlopen(req, reqData) - rpcApiResult = json.loads(response.read().decode("utf-8")) - except HTTPError as e: - Utils.Print("Fail to send RPC API to {} with data {} ({})".format(url, data, e.read())) - raise e - except Exception as e: - Utils.Print("Fail to send RPC API to {} with data {} ({})".format(url, data, e)) - raise e - return rpcApiResult - # Require producer_api_plugin def scheduleProtocolFeatureActivations(self, featureDigests=[]): - self.sendRpcApi("v1/producer/schedule_protocol_feature_activations", {"protocol_features_to_activate": featureDigests}) + param = { "protocol_features_to_activate": featureDigests } + self.processCurlCmd("producer", "schedule_protocol_feature_activations", json.dumps(param)) # Require producer_api_plugin def getSupportedProtocolFeatures(self, excludeDisabled=False, excludeUnactivatable=False): @@ -1482,7 +1462,7 @@ def getSupportedProtocolFeatures(self, excludeDisabled=False, excludeUnactivatab "exclude_disabled": excludeDisabled, "exclude_unactivatable": excludeUnactivatable } - res = self.sendRpcApi("v1/producer/get_supported_protocol_features", param) + res = self.processCurlCmd("producer", "get_supported_protocol_features", json.dumps(param)) return res # This will return supported protocol features in a dict (feature codename as the key), i.e. diff --git a/tests/nodeos_multiple_version_protocol_feature_test.py b/tests/nodeos_multiple_version_protocol_feature_test.py index f2bd4ee8516..be3324f969e 100755 --- a/tests/nodeos_multiple_version_protocol_feature_test.py +++ b/tests/nodeos_multiple_version_protocol_feature_test.py @@ -82,6 +82,7 @@ def hasBlockBecomeIrr(): associatedNodeLabels = { "3": "170" } + Utils.Print("Alternate Version Labels File is {}".format(alternateVersionLabelsFile)) assert exists(alternateVersionLabelsFile), "Alternate version labels file does not exist" assert cluster.launch(pnodes=4, totalNodes=4, prodCount=1, totalProducers=4, extraNodeosArgs=" --plugin eosio::producer_api_plugin ", @@ -91,31 +92,31 @@ def hasBlockBecomeIrr(): alternateVersionLabelsFile=alternateVersionLabelsFile, associatedNodeLabels=associatedNodeLabels), "Unable to launch cluster" - def pauseBlockProduction(nodes:[Node]): - for node in nodes: - node.sendRpcApi("v1/producer/pause") + newNodeIds = [0, 1, 2] + oldNodeId = 3 + newNodes = list(map(lambda id: cluster.getNode(id), newNodeIds)) + oldNode = cluster.getNode(oldNodeId) + allNodes = [*newNodes, oldNode] - def resumeBlockProduction(nodes:[Node]): - for node in nodes: - node.sendRpcApi("v1/producer/resume") + def pauseBlockProductions(): + for node in allNodes: + if not node.killed: node.processCurlCmd("producer", "pause", "") + + def resumeBlockProductions(): + for node in allNodes: + if not node.killed: node.processCurlCmd("producer", "resume", "") def shouldNodesBeInSync(nodes:[Node]): # Pause all block production to ensure the head is not moving - pauseBlockProduction(nodes) + pauseBlockProductions() time.sleep(1) # Wait for some time to ensure all blocks are propagated headBlockIds = [] for node in nodes: headBlockId = node.getInfo()["head_block_id"] headBlockIds.append(headBlockId) - resumeBlockProduction(nodes) + resumeBlockProductions() return len(set(headBlockIds)) == 1 - newNodeIds = [0, 1, 2] - oldNodeId = 3 - newNodes = list(map(lambda id: cluster.getNode(id), newNodeIds)) - oldNode = cluster.getNode(oldNodeId) - allNodes = [*newNodes, oldNode] - # Before everything starts, all nodes (new version and old version) should be in sync assert shouldNodesBeInSync(allNodes), "Nodes are not in sync before preactivation"