From 34ce13df2fb81f49efabd565829d1245c8bef452 Mon Sep 17 00:00:00 2001 From: Greg Lee Date: Wed, 13 Mar 2019 07:34:15 -0400 Subject: [PATCH 01/49] Added EOS Rio's Hyperion History API described in this blog post: https://medium.com/@eosriobrazil/presenting-hyperion-history-api-solution-f8a8fda5865b --- plugins/COMMUNITY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/COMMUNITY.md b/plugins/COMMUNITY.md index 55cce74a33b..6ec4f66a6e0 100644 --- a/plugins/COMMUNITY.md +++ b/plugins/COMMUNITY.md @@ -17,6 +17,7 @@ Third parties are encouraged to make pull requests to this file (`develop` branc | Chintai ZMQ Watcher | https://github.com/acoutts/chintai-zeromq-watcher-plugin | | Mongo History API | https://github.com/CryptoLions/EOS-mongo-history-API | | State History API | https://github.com/acoutts/EOS-state-history-API | +| Hyperion History API | https://github.com/eosrio/Hyperion-History-API | ## DISCLAIMER: From a71d76174825fefdcd71a2946eecb24ebeb1ec2d Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Wed, 13 Mar 2019 08:50:36 -0400 Subject: [PATCH 02/49] pipeline changes (#6920) --- .buildkite/coverage.yml | 22 +++++++++------------- CMakeLists.txt | 2 +- tests/CMakeLists.txt | 2 +- 3 files changed, 11 insertions(+), 15 deletions(-) diff --git a/.buildkite/coverage.yml b/.buildkite/coverage.yml index 190c9c7f5f9..ded8b3651e5 100644 --- a/.buildkite/coverage.yml +++ b/.buildkite/coverage.yml @@ -1,17 +1,16 @@ steps: - - command: | + - label: ":spiral_note_pad: Generate Report" + command: | + echo "--- :hammer: Ensuring lcov is installed" && apt-get install -y lcov && \ echo "--- :hammer: Building" && \ - /usr/bin/cmake -GNinja -DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_COMPILER=clang++-4.0 -DCMAKE_C_COMPILER=clang-4.0 -DBOOST_ROOT="${BOOST_ROOT}" -DWASM_ROOT="${WASM_ROOT}" -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true -DENABLE_COVERAGE_TESTING=true -DBUILD_DOXYGEN=false && \ - /usr/bin/ninja + cmake -GNinja -DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_COMPILER=clang++-4.0 -DCMAKE_C_COMPILER=clang-4.0 -DBOOST_ROOT="${BOOST_ROOT}" -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true -DENABLE_COVERAGE_TESTING=true -DBUILD_DOXYGEN=false && \ + ninja && \ echo "--- :spiral_note_pad: Generating Code Coverage Report" && \ - /usr/bin/ninja EOSIO_ut_coverage && \ + ninja EOSIO_ut_coverage && \ echo "--- :arrow_up: Publishing Code Coverage Report" && \ buildkite-agent artifact upload "EOSIO_ut_coverage/**/*" s3://eos-coverage/$BUILDKITE_JOB_ID && \ - cp /config/.coveralls.yml . && \ - /usr/local/bin/coveralls-lcov EOSIO_ut_coverage_filtered.info && \ echo "+++ View Report" && \ printf "\033]1339;url=https://eos-coverage.s3-us-west-2.amazonaws.com/$BUILDKITE_JOB_ID/EOSIO_ut_coverage/index.html;content=View Full Coverage Report\a\n" - label: ":spiral_note_pad: Generate Report" agents: queue: "automation-large-builder-fleet" plugins: @@ -20,16 +19,13 @@ steps: account_ids: "436617320021" no-include-email: true region: "us-west-2" - docker#v2.1.0: + docker#v3.0.1: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" workdir: /data/job - mounts: - - /etc/buildkite-agent/config:/config environment: + - LCOV_PATH=/usr/bin/lcov - BOOST_ROOT=/root/opt/boost - OPENSSL_ROOT_DIR=/usr/include/openssl - - WASM_ROOT=/root/opt/wasm - - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/opt/wasm/bin - - CI=true + - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/opt/mongodb/bin:~/opt/llvm/bin/ timeout: 60 diff --git a/CMakeLists.txt b/CMakeLists.txt index 997ae2c1e65..0d54f35526c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -192,7 +192,7 @@ set(ENABLE_COVERAGE_TESTING FALSE CACHE BOOL "Build EOSIO for code coverage anal if(ENABLE_COVERAGE_TESTING) SET(CMAKE_CXX_FLAGS "--coverage ${CMAKE_CXX_FLAGS}") - find_program( LCOV_PATH lcov ) + find_program( LCOV_PATH lcov ) find_program( LLVMCOV_PATH llvm-cov ) find_program( GENHTML_PATH NAMES genhtml) endif() diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 68116bab863..ae9b36bcd68 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -117,7 +117,7 @@ if(ENABLE_COVERAGE_TESTING) if(NOT LLVMCOV_PATH) message(FATAL_ERROR "llvm-cov not found! Aborting...") - endif() # NOT LCOV_PATH + endif() if(NOT GENHTML_PATH) message(FATAL_ERROR "genhtml not found! Aborting...") From dc8e70002663c46b3631c8b575c9ce82c39f55cb Mon Sep 17 00:00:00 2001 From: eun2ce Date: Thu, 14 Mar 2019 14:57:10 +0900 Subject: [PATCH 03/49] clear unused variable --- programs/cleos/main.cpp | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index bfceb4b40cc..caa24ae5ccf 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -305,7 +305,7 @@ void sign_transaction(signed_transaction& trx, fc::variant& required_keys, const trx = signed_trx.as(); } -fc::variant push_transaction( signed_transaction& trx, int32_t extra_kcpu = 1000, packed_transaction::compression_type compression = packed_transaction::none ) { +fc::variant push_transaction( signed_transaction& trx, packed_transaction::compression_type compression = packed_transaction::none ) { auto info = get_info(); if (trx.signatures.size() == 0) { // #5445 can't change txn content if already signed @@ -347,11 +347,11 @@ fc::variant push_transaction( signed_transaction& trx, int32_t extra_kcpu = 1000 } } -fc::variant push_actions(std::vector&& actions, int32_t extra_kcpu, packed_transaction::compression_type compression = packed_transaction::none ) { +fc::variant push_actions(std::vector&& actions, packed_transaction::compression_type compression = packed_transaction::none ) { signed_transaction trx; trx.actions = std::forward(actions); - return push_transaction(trx, extra_kcpu, compression); + return push_transaction(trx, compression); } void print_action( const fc::variant& at ) { @@ -497,8 +497,8 @@ void print_result( const fc::variant& result ) { try { } FC_CAPTURE_AND_RETHROW( (result) ) } using std::cout; -void send_actions(std::vector&& actions, int32_t extra_kcpu = 1000, packed_transaction::compression_type compression = packed_transaction::none ) { - auto result = push_actions( move(actions), extra_kcpu, compression); +void send_actions(std::vector&& actions, packed_transaction::compression_type compression = packed_transaction::none ) { + auto result = push_actions( move(actions), compression); if( tx_print_json ) { cout << fc::json::to_pretty_string( result ) << endl; @@ -507,8 +507,8 @@ void send_actions(std::vector&& actions, int32_t extra_kcpu = 100 } } -void send_transaction( signed_transaction& trx, int32_t extra_kcpu, packed_transaction::compression_type compression = packed_transaction::none ) { - auto result = push_transaction(trx, extra_kcpu, compression); +void send_transaction( signed_transaction& trx, packed_transaction::compression_type compression = packed_transaction::none ) { + auto result = push_transaction(trx, compression); if( tx_print_json ) { cout << fc::json::to_pretty_string( result ) << endl; @@ -2971,7 +2971,7 @@ int main( int argc, char** argv ) { actions.emplace_back( create_setcode(account, code_bytes ) ); if ( shouldSend ) { std::cerr << localized("Setting Code...") << std::endl; - send_actions(std::move(actions), 10000, packed_transaction::zlib); + send_actions(std::move(actions), packed_transaction::zlib); } } else { std::cerr << localized("Skipping set code because the new code is the same as the existing code") << std::endl; @@ -3019,7 +3019,7 @@ int main( int argc, char** argv ) { } EOS_RETHROW_EXCEPTIONS(abi_type_exception, "Fail to parse ABI JSON") if ( shouldSend ) { std::cerr << localized("Setting ABI...") << std::endl; - send_actions(std::move(actions), 10000, packed_transaction::zlib); + send_actions(std::move(actions), packed_transaction::zlib); } } else { std::cerr << localized("Skipping set abi because the new abi is the same as the existing abi") << std::endl; @@ -3036,7 +3036,7 @@ int main( int argc, char** argv ) { set_abi_callback(); if (actions.size()) { std::cerr << localized("Publishing contract...") << std::endl; - send_actions(std::move(actions), 10000, packed_transaction::zlib); + send_actions(std::move(actions), packed_transaction::zlib); } else { std::cout << "no transaction is sent" << std::endl; } From 9f1ac1b26070b5bbc7c661613a2ffd8f6fca19d7 Mon Sep 17 00:00:00 2001 From: Greg Lee Date: Thu, 14 Mar 2019 05:55:53 -0400 Subject: [PATCH 04/49] Add Chronicle to COMMUNITY.md --- plugins/COMMUNITY.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/plugins/COMMUNITY.md b/plugins/COMMUNITY.md index 6ec4f66a6e0..bff0cbddbb3 100644 --- a/plugins/COMMUNITY.md +++ b/plugins/COMMUNITY.md @@ -1,8 +1,8 @@ # Community Plugin List -This file contains a list of community authored plugins for `nodeos`, acting as a directory of the plugins that are available. +This file contains a list of community authored plugins for `nodeos` and APIs/tools that are associated with plugins, acting as a directory of the community authored plugins that are available. -Third parties are encouraged to make pull requests to this file (`develop` branch please) in order to list new plugins. +Third parties are encouraged to make pull requests to this file (`develop` branch please) in order to list new related projects. | Description | URL | | ----------- | --- | @@ -18,7 +18,8 @@ Third parties are encouraged to make pull requests to this file (`develop` branc | Mongo History API | https://github.com/CryptoLions/EOS-mongo-history-API | | State History API | https://github.com/acoutts/EOS-state-history-API | | Hyperion History API | https://github.com/eosrio/Hyperion-History-API | +| Chronicle | https://github.com/EOSChronicleProject/eos-chronicle | ## DISCLAIMER: -The fact that a plugin is listed in this file does not mean the plugin has been reviewed by this repository's maintainers. No warranties are made, i.e. you are at your own risk if you choose to use them. +The fact that a plugin or API/tool is listed in this file does not mean it has been reviewed by this repository's maintainers. No warranties are made, i.e. you are at your own risk if you choose to use them. From 21f11fa50c9b7865e3511167cd248a157259596c Mon Sep 17 00:00:00 2001 From: Greg Lee Date: Thu, 14 Mar 2019 06:03:04 -0400 Subject: [PATCH 05/49] Update disclaimer in COMMUNITY.md --- plugins/COMMUNITY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/COMMUNITY.md b/plugins/COMMUNITY.md index bff0cbddbb3..f47cefa8e63 100644 --- a/plugins/COMMUNITY.md +++ b/plugins/COMMUNITY.md @@ -22,4 +22,4 @@ Third parties are encouraged to make pull requests to this file (`develop` branc ## DISCLAIMER: -The fact that a plugin or API/tool is listed in this file does not mean it has been reviewed by this repository's maintainers. No warranties are made, i.e. you are at your own risk if you choose to use them. +The resources listed here are developed, offered and maintained by third-parties and not by block.one. Providing information, material or commentaries about such third-party resources does not mean we endorse or recommend any of these resources. We are not responsible, and disclaim any responsibility or liability, for your use of or reliance on any of these resources. Third-party resources may be updated, changed or terminated at any time, so the information here may be out of date or inaccurate. USAGE AND RELIANCE IS ENTIRELY AT YOUR OWN RISK. From bb9ce200ed1c6e71132c12cdeb6310807fcabf3a Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 14 Mar 2019 08:12:31 -0500 Subject: [PATCH 06/49] Added launcher flags to support supplying alternate installation paths. GH #6879. --- programs/eosio-launcher/main.cpp | 56 ++++++++++++++++++++------------ 1 file changed, 36 insertions(+), 20 deletions(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 66b40819b9d..8a3a75a721b 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -408,6 +408,7 @@ struct launcher_def { bool skip_transaction_signatures = false; string eosd_extra_args; std::map specific_nodeos_args; + std::map specific_nodeos_installation_paths; testnet_def network; string gelf_endpoint; vector aliases; @@ -488,8 +489,10 @@ launcher_def::set_options (bpo::options_description &cfg) { ("genesis,g",bpo::value()->default_value("./genesis.json"),"set the path to genesis.json") ("skip-signature", bpo::bool_switch(&skip_transaction_signatures)->default_value(false), "nodeos does not require transaction signatures.") ("nodeos", bpo::value(&eosd_extra_args), "forward nodeos command line argument(s) to each instance of nodeos, enclose arg(s) in quotes") - ("specific-num", bpo::value>()->composing(), "forward nodeos command line argument(s) (using \"--specific-nodeos\" flag) to this specific instance of nodeos. This parameter can be entered multiple times and requires a paired \"--specific-nodeos\" flag") + ("specific-num", bpo::value>()->composing(), "forward nodeos command line argument(s) (using \"--specific-nodeos\" flag) to this specific instance of nodeos. This parameter can be entered multiple times and requires a paired \"--specific-nodeos\" flag each time it is used") ("specific-nodeos", bpo::value>()->composing(), "forward nodeos command line argument(s) to its paired specific instance of nodeos(using \"--specific-num\"), enclose arg(s) in quotes") + ("spcfc-inst-num", bpo::value>()->composing(), "Specify a specific version installation path (using \"--spcfc-inst-nodeos\" flag) for launching this specific instance of nodeos. This parameter can be entered multiple times and requires a paired \"--spcfc-inst-nodeos\" flag each time it is used") + ("spcfc-inst-nodeos", bpo::value>()->composing(), "Provide a specific version installation path to its paired specific instance of nodeos(using \"--spcfc-inst-num\")") ("delay,d",bpo::value(&start_delay)->default_value(0),"seconds delay before starting each node after the first") ("boot",bpo::bool_switch(&boot)->default_value(false),"After deploying the nodes and generating a boot script, invoke it.") ("nogen",bpo::bool_switch(&nogen)->default_value(false),"launch nodes without writing new config files") @@ -513,6 +516,28 @@ inline enum_type& operator|=(enum_type&lhs, const enum_type& rhs) return lhs = static_cast(static_cast(lhs) | static_cast(rhs)); } +template +void retrieve_paired_array_parameters (const variables_map &vmap, const std::string& num_selector, const std::string& paired_selector, std::map& selector_map) { + if (vmap.count(num_selector)) { + const auto specific_nums = vmap[num_selector].as>(); + const auto specific_args = vmap[paired_selector].as>(); + if (specific_nums.size() != specific_args.size()) { + cerr << "ERROR: every " << num_selector << " argument must be paired with a " << paired_selector << " argument" << endl; + exit (-1); + } + const auto total_nodes = vmap["nodes"].as(); + for(uint i = 0; i < specific_nums.size(); ++i) + { + const auto& num = specific_nums[i]; + if (num >= total_nodes) { + cerr << "\"--" << num_selector << "\" provided value= " << num << " is higher than \"--nodes\" provided value=" << total_nodes << endl; + exit (-1); + } + selector_map[num] = specific_args[i]; + } + } +} + void launcher_def::initialize (const variables_map &vmap) { if (vmap.count("mode")) { @@ -550,24 +575,8 @@ launcher_def::initialize (const variables_map &vmap) { server_ident_file = vmap["servers"].as(); } - if (vmap.count("specific-num")) { - const auto specific_nums = vmap["specific-num"].as>(); - const auto specific_args = vmap["specific-nodeos"].as>(); - if (specific_nums.size() != specific_args.size()) { - cerr << "ERROR: every specific-num argument must be paired with a specific-nodeos argument" << endl; - exit (-1); - } - const auto total_nodes = vmap["nodes"].as(); - for(uint i = 0; i < specific_nums.size(); ++i) - { - const auto& num = specific_nums[i]; - if (num >= total_nodes) { - cerr << "\"--specific-num\" provided value= " << num << " is higher than \"--nodes\" provided value=" << total_nodes << endl; - exit (-1); - } - specific_nodeos_args[num] = specific_args[i]; - } - } + retrieve_paired_array_parameters(vmap, "specific-num", "specific-nodeos", specific_nodeos_args); + retrieve_paired_array_parameters(vmap, "spcfc-inst-num", "spcfc-inst-nodeos", specific_nodeos_installation_paths); using namespace std::chrono; system_clock::time_point now = system_clock::now(); @@ -1511,7 +1520,14 @@ launcher_def::launch (eosd_def &instance, string >s) { node_rt_info info; info.remote = !host->is_local(); - string eosdcmd = "programs/nodeos/nodeos "; + string install_path; + if (instance.name != "bios" && !specific_nodeos_installation_paths.empty()) { + const auto node_num = boost::lexical_cast(instance.get_node_num()); + if (specific_nodeos_installation_paths.count(node_num)) { + install_path = specific_nodeos_installation_paths[node_num] + "/"; + } + } + string eosdcmd = install_path + "programs/nodeos/nodeos "; if (skip_transaction_signatures) { eosdcmd += "--skip-transaction-signatures "; } From 4240c316d7d7cd10f589d80f690230fe888dd785 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 14 Mar 2019 08:14:57 -0500 Subject: [PATCH 07/49] Added helper flag to supply alternate label mapping file. GH #6879. --- tests/TestHelper.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/TestHelper.py b/tests/TestHelper.py index 10b69fa334d..a9920a731c1 100644 --- a/tests/TestHelper.py +++ b/tests/TestHelper.py @@ -105,6 +105,8 @@ def parse_args(includeArgs, applicationSpecificArgs=AppArgs()): parser.add_argument("--clean-run", help="Kill all nodeos and kleos instances", action='store_true') if "--sanity-test" in includeArgs: parser.add_argument("--sanity-test", help="Validates nodeos and kleos are in path and can be started up.", action='store_true') + if "--alternate-versions-labels-file" in includeArgs: + parser.add_argument("--alternate-versions-labels-file", type=str, help="Provide a file to define the labels that can be used in the test and the path to the version installation associated with that.") for arg in applicationSpecificArgs.args: parser.add_argument(arg.flag, type=arg.type, help=arg.help, choices=arg.choices, default=arg.default) From 6316febc4a8632b17f9466394c26264332373541 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 14 Mar 2019 08:15:53 -0500 Subject: [PATCH 08/49] Added support for supplying alternate label mappings to Cluster.launcher to take advantage of launcher flags. GH #6879. --- tests/Cluster.py | 60 ++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 58 insertions(+), 2 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 3bc0f215566..d41d8e8731d 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -83,6 +83,7 @@ def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888 self.useBiosBootFile=False self.filesToCleanup=[] + self.alternateVersionLabels=Cluster.__defaultAlternateVersionLabels() def setChainStrategy(self, chainSyncStrategy=Utils.SyncReplayTag): @@ -93,13 +94,43 @@ def setChainStrategy(self, chainSyncStrategy=Utils.SyncReplayTag): def setWalletMgr(self, walletMgr): self.walletMgr=walletMgr + @staticmethod + def __defaultAlternateVersionLabels(): + """Return a labels dictionary with just the "current" label to path set.""" + labels={} + labels["current"]="./" + return labels + + def setAlternateVersionLabels(self, file): + """From the provided file return a dictionary of labels to paths.""" + Utils.Print("alternate file=%s" % (file)) + self.alternateVersionLabels=Cluster.__defaultAlternateVersionLabels() + if file is None: + # only have "current" + return + if not os.path.exists(file): + Utils.errorExit("Alternate Version Labels file \"%s\" does not exist" % (file)) + with open(file, 'r') as f: + content=f.read() + p=re.compile(r'^\s*(\w+)\s*=\s*([^\s](?:.*[^\s])?)\s*$', re.MULTILINE) + all=p.findall(content) + for match in all: + label=match[0] + path=match[1] + if label=="current": + Utils.Print("ERROR: cannot overwrite default label %s with path=%s" % (label, path)) + continue + self.alternateVersionLabels[label]=path + if Utils.Debug: Utils.Print("Version label \"%s\" maps to \"%s\"" % (label, path)) + # launch local nodes and set self.nodes # pylint: disable=too-many-locals # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches # pylint: disable=too-many-statements def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, - totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None): + totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None, alternateVersionLabelsFile=None, + associatedNodeLabels=None): """Launch cluster. pnodes: producer nodes count totalNodes: producer + non-producer nodes count @@ -115,8 +146,19 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne A value of false uses manual bootstrapping in this script, which does not do things like stake votes for producers. specificExtraNodeosArgs: dictionary of arguments to pass to a specific node (via --specific-num and --specific-nodeos flags on launcher), example: { "5" : "--plugin eosio::test_control_api_plugin" } + alternateVersionLabelsFile: Supply an alternate version labels file to use with associatedNodeLabels. + associatedNodeLabels: Supply a dictionary of node numbers to use an alternate label for a specific node. """ assert(isinstance(topo, str)) + if alternateVersionLabelsFile is not None: + assert(isinstance(alternateVersionLabelsFile, str)) + elif associatedNodeLabels is not None: + associatedNodeLabels=None # need to supply alternateVersionLabelsFile to use labels + + if associatedNodeLabels is not None: + assert(isinstance(associatedNodeLabels, dict)) + Utils.Print("associatedNodeLabels size=%s" % (len(associatedNodeLabels))) + Utils.Print("alternateVersionLabelsFile=%s" % (alternateVersionLabelsFile)) if not self.localCluster: Utils.Print("WARNING: Cluster not local, not launching %s." % (Utils.EosServerName)) @@ -136,6 +178,8 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne assert(isinstance(totalProducers, (str,int))) producerFlag="--producers %s" % (totalProducers) + self.setAlternateVersionLabels(alternateVersionLabelsFile) + tries = 30 while not Utils.arePortsAvailable(set(range(self.port, self.port+totalNodes+1))): Utils.Print("ERROR: Another process is listening on nodeos default port. wait...") @@ -181,6 +225,18 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne cmdArr.append("--max-transaction-cpu-usage") cmdArr.append(str(150000000)) + if associatedNodeLabels is not None: + for nodeNum,label in associatedNodeLabels.items(): + assert(isinstance(nodeNum, (str,int))) + assert(isinstance(label, str)) + path=self.alternateVersionLabels.get(label) + if path is None: + Utils.errorExit("associatedNodeLabels passed in indicates label %s for node num %s, but it was not identified in %s" % (label, nodeNum, alternateVersionLabelsFile)) + cmdArr.append("--spcfc-inst-num") + cmdArr.append(str(nodeNum)) + cmdArr.append("--spcfc-inst-nodeos") + cmdArr.append(path) + # must be last cmdArr.append before subprocess.call, so that everything is on the command line # before constructing the shape.json file for "bridge" if topo=="bridge": @@ -273,7 +329,7 @@ def getNodeNum(nodeName): producerGroup2.append(nodeName) Utils.Print("Group2 grouping producerIndex=%s, secondGroupStart=%s" % (producerIndex,secondGroupStart)) if group!=prodGroup: - errorExit("Node configuration not consistent with \"bridge\" topology. Node %s has producers that fall into both halves of the bridged network" % (nodeName)) + Utils.errorExit("Node configuration not consistent with \"bridge\" topology. Node %s has producers that fall into both halves of the bridged network" % (nodeName)) for _,bridgeNode in bridgeNodes.items(): bridgeNode["peers"]=[] From 6c54979d75be46c9ef1457f1c350d8cde7ca628f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 18 Mar 2019 12:14:05 -0500 Subject: [PATCH 09/49] Update appbase to shutdown fix --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index f97eaef38f0..be9285b9600 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit f97eaef38f09d3e0a261540c6e0f5868b0bf61e9 +Subproject commit be9285b9600a109baa8704c310f2c3abaf595d2c From 9564d86f647090352494cb8b43f726a289f57a49 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 18 Mar 2019 13:01:42 -0500 Subject: [PATCH 10/49] Update to appbase master with shutdown fix --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index be9285b9600..013246f52f1 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit be9285b9600a109baa8704c310f2c3abaf595d2c +Subproject commit 013246f52f13a7bc129193c3a64e6cd0cea44ac0 From 59447e2dda0a4bbc511376531f9599441d304982 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 19 Mar 2019 11:24:29 -0400 Subject: [PATCH 11/49] Rename eosio-wat2wasm back to orginal name; don't install eosio-wat2wasm was really the Assemble command from WAVM and we used it for the old wasm build enviroment. It's no longer needed. Remove the rename and install changes effectively reverting ae9388d restoring this back to upstream --- libraries/wasm-jit/Source/Programs/Assemble.cpp | 2 +- libraries/wasm-jit/Source/Programs/CMakeLists.txt | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/libraries/wasm-jit/Source/Programs/Assemble.cpp b/libraries/wasm-jit/Source/Programs/Assemble.cpp index a3328794ddd..60ca42cf0f9 100644 --- a/libraries/wasm-jit/Source/Programs/Assemble.cpp +++ b/libraries/wasm-jit/Source/Programs/Assemble.cpp @@ -7,7 +7,7 @@ int commandMain(int argc,char** argv) { if(argc < 3) { - std::cerr << "Usage: eosio-wast2wasm in.wast out.wasm [switches]" << std::endl; + std::cerr << "Usage: Assemble in.wast out.wasm [switches]" << std::endl; std::cerr << " -n|--omit-names\t\tOmits WAST function and local names from the output" << std::endl; return EXIT_FAILURE; } diff --git a/libraries/wasm-jit/Source/Programs/CMakeLists.txt b/libraries/wasm-jit/Source/Programs/CMakeLists.txt index 27a3aa427b4..260f4c1092c 100644 --- a/libraries/wasm-jit/Source/Programs/CMakeLists.txt +++ b/libraries/wasm-jit/Source/Programs/CMakeLists.txt @@ -1,7 +1,6 @@ -add_executable(eosio-wast2wasm Assemble.cpp CLI.h) -target_link_libraries(eosio-wast2wasm Logging IR WAST WASM) -set_target_properties(eosio-wast2wasm PROPERTIES FOLDER Programs) -INSTALL(TARGETS eosio-wast2wasm DESTINATION ${CMAKE_INSTALL_BINDIR}) +add_executable(Assemble Assemble.cpp CLI.h) +target_link_libraries(Assemble Logging IR WAST WASM) +set_target_properties(Assemble PROPERTIES FOLDER Programs) add_executable(Disassemble Disassemble.cpp CLI.h) target_link_libraries(Disassemble Logging IR WAST WASM) From 96f6ee165cbacfd9b598e1af248bd9eda2ec3733 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 19 Mar 2019 11:56:50 -0400 Subject: [PATCH 12/49] Don't build WAVM tools any longer Some of these don't work as intended due to changes in WAVM to support EOSIO --- libraries/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index a40355971a9..39d0398305d 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -2,7 +2,7 @@ add_subdirectory( fc ) add_subdirectory( builtins ) add_subdirectory( softfloat ) add_subdirectory( chainbase ) -add_subdirectory( wasm-jit ) +add_subdirectory( wasm-jit EXCLUDE_FROM_ALL ) add_subdirectory( appbase ) add_subdirectory( chain ) add_subdirectory( testing ) From 756f3c70585a33717151902b6b3a2b0304b6558e Mon Sep 17 00:00:00 2001 From: Zach <34947245+kj4ezj@users.noreply.github.com> Date: Wed, 20 Mar 2019 11:17:47 -0400 Subject: [PATCH 13/49] Created test scripts with xUnit, auto-scaling parallelism, and empty test detection; and added them to pipeline (#6963) * Created test scripts with xUnit, auto-scaling parallelism, and empty test detection, and added them to pipeline * Copy-pasta error * Added xUnit output and empty test detection to long-running tests * Removed escape character from copy-pasta * Suppress missing DartConfiguration.tcl file error * Increased the long-running test timeout from 60 min to 90 min * Removed line continuations from Buildkite yaml files * Deleted Buildkite yaml files from pipelines migrated to Buildkite repo --- .buildkite/coverage.yml | 31 --- .buildkite/debug.yml | 343 -------------------------- .buildkite/docker.yml | 101 -------- .buildkite/long_running_tests.yml | 210 ++++++---------- .buildkite/pipeline.yml | 391 +++++++++++------------------- .buildkite/sanitizers.yml | 155 ------------ CMakeLists.txt | 2 +- scripts/long-running-test.sh | 26 ++ scripts/parallel-test.sh | 28 +++ scripts/serial-test.sh | 26 ++ 10 files changed, 301 insertions(+), 1012 deletions(-) delete mode 100644 .buildkite/coverage.yml delete mode 100644 .buildkite/debug.yml delete mode 100644 .buildkite/docker.yml delete mode 100644 .buildkite/sanitizers.yml create mode 100755 scripts/long-running-test.sh create mode 100755 scripts/parallel-test.sh create mode 100755 scripts/serial-test.sh diff --git a/.buildkite/coverage.yml b/.buildkite/coverage.yml deleted file mode 100644 index ded8b3651e5..00000000000 --- a/.buildkite/coverage.yml +++ /dev/null @@ -1,31 +0,0 @@ -steps: - - label: ":spiral_note_pad: Generate Report" - command: | - echo "--- :hammer: Ensuring lcov is installed" && apt-get install -y lcov && \ - echo "--- :hammer: Building" && \ - cmake -GNinja -DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_COMPILER=clang++-4.0 -DCMAKE_C_COMPILER=clang-4.0 -DBOOST_ROOT="${BOOST_ROOT}" -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true -DENABLE_COVERAGE_TESTING=true -DBUILD_DOXYGEN=false && \ - ninja && \ - echo "--- :spiral_note_pad: Generating Code Coverage Report" && \ - ninja EOSIO_ut_coverage && \ - echo "--- :arrow_up: Publishing Code Coverage Report" && \ - buildkite-agent artifact upload "EOSIO_ut_coverage/**/*" s3://eos-coverage/$BUILDKITE_JOB_ID && \ - echo "+++ View Report" && \ - printf "\033]1339;url=https://eos-coverage.s3-us-west-2.amazonaws.com/$BUILDKITE_JOB_ID/EOSIO_ut_coverage/index.html;content=View Full Coverage Report\a\n" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v3.0.1: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" - workdir: /data/job - environment: - - LCOV_PATH=/usr/bin/lcov - - BOOST_ROOT=/root/opt/boost - - OPENSSL_ROOT_DIR=/usr/include/openssl - - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/opt/mongodb/bin:~/opt/llvm/bin/ - timeout: 60 diff --git a/.buildkite/debug.yml b/.buildkite/debug.yml deleted file mode 100644 index 3cd6b16d23a..00000000000 --- a/.buildkite/debug.yml +++ /dev/null @@ -1,343 +0,0 @@ -steps: - - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- Compressing build directory :compression:" && \ - tar -pczf build.tar.gz build/ - label: ":darwin: Mojave Build" - agents: - - "role=builder-v2-1" - - "os=mojave" - artifact_paths: "build.tar.gz" - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":ubuntu: 16.04 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":ubuntu: 18.04 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":fedora: 27 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":centos: 7 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":aws: 1 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":aws: 2 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - workdir: /data/job - timeout: 60 - - - wait - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":darwin: Mojave Tests" - agents: - - "role=builder-v2-1" - - "os=mojave" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":ubuntu: 16.04 Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":ubuntu: 18.04 Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":fedora: 27 Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":centos: 7 Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":aws: 1 Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":aws: 2 Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - workdir: /data/job - timeout: 60 \ No newline at end of file diff --git a/.buildkite/docker.yml b/.buildkite/docker.yml deleted file mode 100644 index 9be30a77cef..00000000000 --- a/.buildkite/docker.yml +++ /dev/null @@ -1,101 +0,0 @@ -steps: - - command: | - echo "AUTHENTICATING GOOGLE SERVICE ACCOUNT" && \ - gcloud --quiet auth activate-service-account b1-automation-svc@b1-automation-dev.iam.gserviceaccount.com --key-file=/etc/gcp-service-account.json && \ - docker-credential-gcr configure-docker && \ - echo "BUILDING BUILD IMAGE" && \ - cd Docker/builder && \ - docker build -t eosio/builder:latest -t eosio/builder:$BUILDKITE_COMMIT -t eosio/builder:$BUILDKITE_BRANCH . --build-arg branch=$BUILDKITE_COMMIT && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/builder:latest eosio/builder:$BUILDKITE_TAG || : && \ - docker tag eosio/builder:$BUILDKITE_COMMIT gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ - docker tag eosio/builder:$BUILDKITE_BRANCH gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/builder:$BUILDKITE_TAG gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_TAG || : && \ - docker tag eosio/builder:latest gcr.io/b1-automation-dev/eosio/builder:latest && \ - echo "PUSHING DOCKER IMAGES" && \ - docker push gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ - docker push gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker push gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_TAG || : && \ - docker push gcr.io/b1-automation-dev/eosio/builder:latest && \ - echo "TRASHING OLD IMAGES" && \ - docker rmi eosio/builder:$BUILDKITE_COMMIT && \ - docker rmi eosio/builder:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker rmi eosio/builder:$BUILDKITE_TAG || : && \ - docker rmi eosio/builder:latest && \ - docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ - docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_TAG || : && \ - docker rmi gcr.io/b1-automation-dev/eosio/builder:latest - label: "Docker build builder" - agents: - queue: "automation-docker-builder-fleet" - timeout: 300 - - - wait - - - command: | - echo "AUTHENTICATING GOOGLE SERVICE ACCOUNT" && \ - gcloud --quiet auth activate-service-account b1-automation-svc@b1-automation-dev.iam.gserviceaccount.com --key-file=/etc/gcp-service-account.json && \ - docker-credential-gcr configure-docker && \ - echo "BUILDING EOS IMAGE" && \ - docker pull gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ - cd Docker && \ - docker build -t eosio/eos:latest -t eosio/eos:$BUILDKITE_COMMIT -t eosio/eos:$BUILDKITE_BRANCH . --build-arg branch=$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/eos:latest eosio/eos:$BUILDKITE_TAG || : && \ - docker tag eosio/eos:$BUILDKITE_COMMIT gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_COMMIT && \ - docker tag eosio/eos:$BUILDKITE_BRANCH gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/eos:$BUILDKITE_TAG gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_TAG || : && \ - docker tag eosio/eos:latest gcr.io/b1-automation-dev/eosio/eos:latest && \ - echo "PUSHING DOCKER IMAGES" && \ - docker push gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_COMMIT && \ - docker push gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker push gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_TAG || : && \ - docker push gcr.io/b1-automation-dev/eosio/eos:latest && \ - echo "TRASHING OLD IMAGES" && \ - docker rmi eosio/eos:$BUILDKITE_COMMIT && \ - docker rmi eosio/eos:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker rmi eosio/eos:$BUILDKITE_TAG || : && \ - docker rmi eosio/eos:latest && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_COMMIT && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker rmi gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_TAG || : && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos:latest && \ - docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT - label: "Docker build eos" - agents: - queue: "automation-docker-builder-fleet" - timeout: 300 - - - command: | - echo "AUTHENTICATING GOOGLE SERVICE ACCOUNT" && \ - gcloud --quiet auth activate-service-account b1-automation-svc@b1-automation-dev.iam.gserviceaccount.com --key-file=/etc/gcp-service-account.json && \ - docker-credential-gcr configure-docker && \ - echo "BUILDING EOS DEV IMAGE" && \ - docker pull gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ - cd Docker/dev && \ - docker build -t eosio/eos-dev:latest -t eosio/eos-dev:$BUILDKITE_COMMIT -t eosio/eos-dev:$BUILDKITE_BRANCH . --build-arg branch=$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/eos-dev:latest eosio/eos-dev:$BUILDKITE_TAG || : && \ - docker tag eosio/eos-dev:$BUILDKITE_COMMIT gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_COMMIT && \ - docker tag eosio/eos-dev:$BUILDKITE_BRANCH gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/eos-dev:$BUILDKITE_TAG gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_TAG || : && \ - docker tag eosio/eos-dev:latest gcr.io/b1-automation-dev/eosio/eos-dev:latest && \ - echo "PUSHING DOCKER IMAGES" && \ - docker push gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_COMMIT && \ - docker push gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker push gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_TAG || : && \ - docker push gcr.io/b1-automation-dev/eosio/eos-dev:latest && \ - echo "TRASHING OLD IMAGES" && \ - docker rmi eosio/eos-dev:$BUILDKITE_COMMIT && \ - docker rmi eosio/eos-dev:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker rmi eosio/eos-dev:$BUILDKITE_TAG || : && \ - docker rmi eosio/eos-dev:latest && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_COMMIT && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker rmi gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_TAG || : && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos-dev:latest && \ - docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT - label: "Docker build eos-dev" - agents: - queue: "automation-docker-builder-fleet" - timeout: 300 - - - wait diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml index 0e6133019ce..6383f57c392 100644 --- a/.buildkite/long_running_tests.yml +++ b/.buildkite/long_running_tests.yml @@ -1,9 +1,9 @@ steps: - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":ubuntu: 16.04 Build" agents: @@ -22,9 +22,9 @@ steps: timeout: 60 - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":ubuntu: 18.04 Build" agents: @@ -43,9 +43,9 @@ steps: timeout: 60 - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":centos: 7 Build" agents: @@ -64,9 +64,9 @@ steps: timeout: 60 - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":aws: 1 Build" agents: @@ -85,9 +85,9 @@ steps: timeout: 60 - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":aws: 2 Build" agents: @@ -106,9 +106,9 @@ steps: timeout: 60 - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":fedora: 27 Build" agents: @@ -127,11 +127,11 @@ steps: timeout: 60 - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- Compressing build directory :compression:" && \ + echo "--- Creating symbolic link to job directory :file_folder:" + sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job + echo "+++ Building :hammer:" + ./scripts/eosio_build.sh -y + echo "--- Compressing build directory :compression:" tar -pczf build.tar.gz build/ label: ":darwin: Mojave Build" agents: @@ -141,11 +141,11 @@ steps: timeout: 60 - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- Compressing build directory :compression:" && \ + echo "--- Creating symbolic link to job directory :file_folder:" + sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job + echo "+++ Building :hammer:" + ./scripts/eosio_build.sh -y + echo "--- Compressing build directory :compression:" tar -pczf build.tar.gz build/ label: ":darwin: High Sierra Build" agents: @@ -156,21 +156,14 @@ steps: - wait - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running LR Tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + - command: | # Ubuntu 16.04 Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" + echo "+++ :microscope: Running LR Tests" + ./scripts/long-running-test.sh label: ":ubuntu: 16.04 LR Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -181,23 +174,16 @@ steps: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" workdir: /data/job - timeout: 60 + timeout: 90 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running LR Tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + - command: | # Ubuntu 18.04 Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" + echo "+++ :microscope: Running LR Tests" + ./scripts/long-running-test.sh label: ":ubuntu: 18.04 LR Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -208,23 +194,16 @@ steps: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" workdir: /data/job - timeout: 60 + timeout: 90 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running LR Tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + - command: | # centOS Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" + echo "+++ :microscope: Running LR Tests" + ./scripts/long-running-test.sh label: ":centos: 7 LR Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -235,23 +214,16 @@ steps: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" workdir: /data/job - timeout: 60 + timeout: 90 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running LR Tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + - command: | # Amazon AWS-1 Linux Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" + echo "+++ :microscope: Running LR Tests" + ./scripts/long-running-test.sh label: ":aws: 1 LR Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -262,23 +234,16 @@ steps: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" workdir: /data/job - timeout: 60 + timeout: 90 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running LR Tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + - command: | # Amazon AWS-2 Linux Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" + echo "+++ :microscope: Running LR Tests" + ./scripts/long-running-test.sh label: ":aws: 2 LR Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -289,23 +254,16 @@ steps: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" workdir: /data/job - timeout: 60 + timeout: 90 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running LR Tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + - command: | # Fedora Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" + echo "+++ :microscope: Running LR Tests" + ./scripts/long-running-test.sh label: ":fedora: 27 LR Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -316,42 +274,26 @@ steps: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" workdir: /data/job - timeout: 60 + timeout: 90 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running LR Tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + - command: | # High Sierra Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" + echo "+++ :microscope: Running LR Tests" + ln -s "$(pwd)" /data/job && ./scripts/long-running-test.sh label: ":darwin: High Sierra LR Tests" agents: - "role=tester-v2-1" - "os=high-sierra" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - timeout: 60 + timeout: 90 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running LR Tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + - command: | # Mojave Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + echo "+++ :microscope: Running LR Tests" + ln -s "$(pwd)" /data/job && ./scripts/long-running-test.sh label: ":darwin: Mojave LR Tests" agents: - "role=tester-v2-1" - "os=mojave" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - timeout: 60 - - \ No newline at end of file + timeout: 90 diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 4e860734910..57ce31e5a6c 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,9 +1,9 @@ steps: - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":ubuntu: 16.04 Build" agents: @@ -22,9 +22,9 @@ steps: timeout: 60 - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":ubuntu: 18.04 Build" agents: @@ -43,9 +43,9 @@ steps: timeout: 60 - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":centos: 7 Build" agents: @@ -64,9 +64,9 @@ steps: timeout: 60 - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":aws: 1 Build" agents: @@ -85,9 +85,9 @@ steps: timeout: 60 - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":aws: 2 Build" agents: @@ -106,9 +106,9 @@ steps: timeout: 60 - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":fedora: 27 Build" agents: @@ -127,11 +127,11 @@ steps: timeout: 60 - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- Compressing build directory :compression:" && \ + echo "--- Creating symbolic link to job directory :file_folder:" + sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job + echo "+++ Building :hammer:" + ./scripts/eosio_build.sh -y + echo "--- Compressing build directory :compression:" tar -pczf build.tar.gz build/ label: ":darwin: Mojave Build" agents: @@ -141,11 +141,11 @@ steps: timeout: 60 - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- Compressing build directory :compression:" && \ + echo "--- Creating symbolic link to job directory :file_folder:" + sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job + echo "+++ Building :hammer:" + ./scripts/eosio_build.sh -y + echo "--- Compressing build directory :compression:" tar -pczf build.tar.gz build/ label: ":darwin: High Sierra Build" agents: @@ -156,21 +156,15 @@ steps: - wait + # Ubuntu 16.04 Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh label: ":ubuntu: 16.04 Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -184,20 +178,13 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh label: ":ubuntu: 16.04 NP Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -210,21 +197,15 @@ steps: workdir: /data/job timeout: 60 + # Ubuntu 18.04 Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh label: ":ubuntu: 18.04 Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -238,20 +219,13 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh label: ":ubuntu: 18.04 NP Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -264,22 +238,15 @@ steps: workdir: /data/job timeout: 60 - + # centOS Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh label: ":centos: 7 Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -293,20 +260,13 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh label: ":centos: 7 NP Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -319,21 +279,15 @@ steps: workdir: /data/job timeout: 60 + # Amazon AWS-1 Linux Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh label: ":aws: 1 Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -347,20 +301,13 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh label: ":aws: 1 NP Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -373,21 +320,15 @@ steps: workdir: /data/job timeout: 60 + # Amazon AWS-2 Linux Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh label: ":aws: 2 Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -401,20 +342,13 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh label: ":aws: 2 NP Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -427,21 +361,15 @@ steps: workdir: /data/job timeout: 60 + # Fedora Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh label: ":fedora: 27 Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -455,20 +383,13 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh label: ":fedora: 27 NP Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -481,85 +402,61 @@ steps: workdir: /data/job timeout: 60 + # High Sierra Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" + echo "+++ :microscope: Running Tests" + ln -s "$(pwd)" /data/job + ./scripts/parallel-test.sh label: ":darwin: High Sierra Tests" agents: - "role=tester-v2-1" - "os=high-sierra" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" + echo "+++ :microscope: Running Tests" + ln -s "$(pwd)" /data/job && ./scripts/serial-test.sh label: ":darwin: High Sierra NP Tests" agents: - "role=tester-v2-1" - "os=high-sierra" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" timeout: 60 + # Mojave Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + echo "+++ :microscope: Running Tests" + ln -s "$(pwd)" /data/job + ./scripts/parallel-test.sh label: ":darwin: Mojave Tests" agents: - "role=tester-v2-1" - "os=mojave" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + echo "+++ :microscope: Running Tests" + ln -s "$(pwd)" /data/job && ./scripts/serial-test.sh label: ":darwin: Mojave NP Tests" agents: - "role=tester-v2-1" - "os=mojave" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" timeout: 60 - wait - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew label: ":darwin: High Sierra Package Builder" agents: @@ -571,10 +468,10 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew label: ":darwin: Mojave Package Builder" agents: @@ -586,10 +483,10 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" cd /data/job/build/packages && bash generate_package.sh deb label: ":ubuntu: 16.04 Package builder" agents: @@ -612,10 +509,10 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" cd /data/job/build/packages && bash generate_package.sh deb label: ":ubuntu: 18.04 Package builder" agents: @@ -638,17 +535,17 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ - yum install -y rpm-build && \ - mkdir -p /root/rpmbuild/BUILD && \ - mkdir -p /root/rpmbuild/BUILDROOT && \ - mkdir -p /root/rpmbuild/RPMS && \ - mkdir -p /root/rpmbuild/SOURCES && \ - mkdir -p /root/rpmbuild/SPECS && \ - mkdir -p /root/rpmbuild/SRPMS && \ + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + yum install -y rpm-build + mkdir -p /root/rpmbuild/BUILD + mkdir -p /root/rpmbuild/BUILDROOT + mkdir -p /root/rpmbuild/RPMS + mkdir -p /root/rpmbuild/SOURCES + mkdir -p /root/rpmbuild/SPECS + mkdir -p /root/rpmbuild/SRPMS cd /data/job/build/packages && bash generate_package.sh rpm label: ":fedora: 27 Package builder" agents: @@ -671,17 +568,17 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ - yum install -y rpm-build && \ - mkdir -p /root/rpmbuild/BUILD && \ - mkdir -p /root/rpmbuild/BUILDROOT && \ - mkdir -p /root/rpmbuild/RPMS && \ - mkdir -p /root/rpmbuild/SOURCES && \ - mkdir -p /root/rpmbuild/SPECS && \ - mkdir -p /root/rpmbuild/SRPMS && \ + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + yum install -y rpm-build + mkdir -p /root/rpmbuild/BUILD + mkdir -p /root/rpmbuild/BUILDROOT + mkdir -p /root/rpmbuild/RPMS + mkdir -p /root/rpmbuild/SOURCES + mkdir -p /root/rpmbuild/SPECS + mkdir -p /root/rpmbuild/SRPMS cd /data/job/build/packages && bash generate_package.sh rpm label: ":centos: 7 Package builder" agents: @@ -706,9 +603,9 @@ steps: - wait - command: | - echo "--- :arrow_down: Downloading brew files" && \ - buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: High Sierra Package Builder" && \ - mv build/packages/eosio.rb build/packages/eosio_highsierra.rb && \ + echo "--- :arrow_down: Downloading brew files" + buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: High Sierra Package Builder" + mv build/packages/eosio.rb build/packages/eosio_highsierra.rb buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: Mojave Package Builder" label: ":darwin: Brew Updater" agents: @@ -716,4 +613,4 @@ steps: artifact_paths: - "build/packages/eosio_highsierra.rb" - "build/packages/eosio.rb" - timeout: 60 \ No newline at end of file + timeout: 60 diff --git a/.buildkite/sanitizers.yml b/.buildkite/sanitizers.yml deleted file mode 100644 index d49493eb5ee..00000000000 --- a/.buildkite/sanitizers.yml +++ /dev/null @@ -1,155 +0,0 @@ -steps: - - command: | - echo "--- :hammer: Building with Undefined Sanitizer" && \ - /usr/bin/cmake -GNinja \ - -DCMAKE_BUILD_TYPE=Debug \ - -DCMAKE_CXX_COMPILER=clang++-4.0 \ - -DCMAKE_C_COMPILER=clang-4.0 \ - -DBOOST_ROOT="${BOOST_ROOT}" \ - -DWASM_ROOT="${WASM_ROOT}" \ - -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" \ - -DBUILD_MONGO_DB_PLUGIN=true \ - -DENABLE_COVERAGE_TESTING=true\ - -DBUILD_DOXYGEN=false -DCMAKE_CXX_FLAGS="-fsanitize=undefined -fsanitize-recover=all -g -fno-omit-frame-pointer" \ - -DCMAKE_C_FLAGS="-fsanitize=undefined -fsanitize-recover=all -g -fno-omit-frame-pointer" \ - -DCMAKE_EXE_LINKER_FLAGS="-fsanitize=undefined -fsanitize-recover=all -rtlib=compiler-rt -lgcc_s -pthread" \ - -DCMAKE_MODULE_LINKER_FLAGS="-fsanitize=undefined -fsanitize-recover=all -rtlib=compiler-rt -lgcc_s -pthread" && \ - echo "--- :shinto_shrine: Running ninja" && \ - /usr/bin/ninja | tee ninja.log && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz * - echo "--- :beers: Done" - label: ":_: Undefined Sanitizer" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build.tar.gz" - - "ninja.log" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" - workdir: /data/job - command: ["--privileged"] - mounts: - - /etc/buildkite-agent/config:/config - environment: - - BOOST_ROOT=/root/opt/boost - - OPENSSL_ROOT_DIR=/usr/include/openssl - - WASM_ROOT=/root/opt/wasm - - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/opt/wasm/bin - - CI=true - - UBSAN_OPTIONS=print_stacktrace=1 - timeout: 60 - - - command: | - echo "--- :hammer: Building with Address Sanitizer" && \ - /usr/bin/cmake -GNinja \ - -DCMAKE_BUILD_TYPE=Debug \ - -DCMAKE_CXX_COMPILER=clang++-4.0 \ - -DCMAKE_C_COMPILER=clang-4.0 \ - -DBOOST_ROOT="${BOOST_ROOT}" \ - -DWASM_ROOT="${WASM_ROOT}" \ - -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" \ - -DBUILD_MONGO_DB_PLUGIN=true \ - -DENABLE_COVERAGE_TESTING=true \ - -DBUILD_DOXYGEN=false \ - -DCMAKE_CXX_FLAGS="-fsanitize=address -fsanitize-recover=all -O1 -g -fno-omit-frame-pointer" \ - -DCMAKE_C_FLAGS="-fsanitize=address -fsanitize-recover=all -O1 -g -fno-omit-frame-pointer" \ - -DCMAKE_EXE_LINKER_FLAGS="-fsanitize=address -fsanitize-recover=all -rtlib=compiler-rt -lgcc_s" \ - -DCMAKE_MODULE_LINKER_FLAGS="-fsanitize=address -fsanitize-recover=all -rtlib=compiler-rt -lgcc_s" - echo "--- :shinto_shrine: Running ninja" && \ - /usr/bin/ninja | tee ninja.log && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz * - echo "--- :beers: Done" - label: ":_: Address Sanitizer" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build.tar.gz" - - "ninja.log" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" - workdir: /data/job - command: ["--privileged"] - mounts: - - /etc/buildkite-agent/config:/config - environment: - - BOOST_ROOT=/root/opt/boost - - OPENSSL_ROOT_DIR=/usr/include/openssl - - WASM_ROOT=/root/opt/wasm - - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/opt/wasm/bin - - CI=true - - ASAN_OPTIONS=fast_unwind_on_malloc=0:halt_on_error=0:detect_odr_violation=0:detect_leaks=0:symbolize=1:verbosity=1 - timeout: 60 - - - wait - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":_: Undefined Sanitizer" && \ - tar -zxf build.tar.gz --no-same-owner && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ctest -j8 -LE _tests -V -O sanitizer.log || true - label: ":_: Undefined Sanitizer Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "sanitizer.log" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" - workdir: /data/job - mounts: - - /etc/buildkite-agent/config:/config - timeout: 120 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":_: Address Sanitizer" && \ - tar -zxf build.tar.gz --no-same-owner && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ctest -j8 -LE _tests -V -O sanitizer.log || true - label: ":_: Address Sanitizer Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "sanitizer.log" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" - workdir: /data/job - mounts: - - /etc/buildkite-agent/config:/config - timeout: 120 \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index 0d54f35526c..f9375f0f8b9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required( VERSION 3.5 ) project( EOSIO ) - +include(CTest) # suppresses DartConfiguration.tcl error enable_testing() if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) diff --git a/scripts/long-running-test.sh b/scripts/long-running-test.sh new file mode 100755 index 00000000000..60cae2d0b7f --- /dev/null +++ b/scripts/long-running-test.sh @@ -0,0 +1,26 @@ +#!/bin/bash +set -e # exit on failure of any "simple" command (excludes &&, ||, or | chains) +# prepare environment +PATH=$PATH:~/opt/mongodb/bin +echo "Extracting build directory..." +tar -zxf build.tar.gz +echo "Starting MongoDB..." +~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log +cd /data/job/build +# run tests +echo "Running tests..." +TEST_COUNT=$(ctest -N -L nonparallelizable_tests | grep -i 'Total Tests: ' | cut -d ':' -f 2 | awk '{print $1}') +[[ $TEST_COUNT > 0 ]] && echo "$TEST_COUNT tests found." || (echo "ERROR: No tests registered with ctest! Exiting..." && exit 1) +echo "$ ctest -L long_running_tests --output-on-failure -T Test" +ctest -L long_running_tests --output-on-failure -T Test +# upload artifacts +echo "Uploading artifacts..." +XML_FILENAME="test-results.xml" +mv $(pwd)/Testing/$(ls $(pwd)/Testing/ | grep '20' | tail -n 1)/Test.xml $XML_FILENAME +buildkite-agent artifact upload config.ini +buildkite-agent artifact upload genesis.json +cd .. +buildkite-agent artifact upload mongod.log +cd build +buildkite-agent artifact upload $XML_FILENAME +echo "Done uploading artifacts." diff --git a/scripts/parallel-test.sh b/scripts/parallel-test.sh new file mode 100755 index 00000000000..5174c454e2a --- /dev/null +++ b/scripts/parallel-test.sh @@ -0,0 +1,28 @@ +#!/bin/bash +set -e # exit on failure of any "simple" command (excludes &&, ||, or | chains) +# prepare environment +PATH=$PATH:~/opt/mongodb/bin +echo "Extracting build directory..." +tar -zxf build.tar.gz +echo "Starting MongoDB..." +~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log +cd /data/job/build +# run tests +echo "Running tests..." +CPU_CORES=$(getconf _NPROCESSORS_ONLN) +echo "$CPU_CORES cpu cores detected." +TEST_COUNT=$(ctest -N -LE _tests | grep -i 'Total Tests: ' | cut -d ':' -f 2 | awk '{print $1}') +[[ $TEST_COUNT > 0 ]] && echo "$TEST_COUNT tests found." || (echo "ERROR: No tests registered with ctest! Exiting..." && exit 1) +echo "$ ctest -j $CPU_CORES -LE _tests --output-on-failure -T Test" +ctest -j $CPU_CORES -LE _tests --output-on-failure -T Test +# upload artifacts +echo "Uploading artifacts..." +XML_FILENAME="test-results.xml" +mv $(pwd)/Testing/$(ls $(pwd)/Testing/ | grep '20' | tail -n 1)/Test.xml $XML_FILENAME +buildkite-agent artifact upload config.ini +buildkite-agent artifact upload genesis.json +cd .. +buildkite-agent artifact upload mongod.log +cd build +buildkite-agent artifact upload $XML_FILENAME +echo "Done uploading artifacts." diff --git a/scripts/serial-test.sh b/scripts/serial-test.sh new file mode 100755 index 00000000000..512229d6272 --- /dev/null +++ b/scripts/serial-test.sh @@ -0,0 +1,26 @@ +#!/bin/bash +set -e # exit on failure of any "simple" command (excludes &&, ||, or | chains) +# prepare environment +PATH=$PATH:~/opt/mongodb/bin +echo "Extracting build directory..." +tar -zxf build.tar.gz +echo "Starting MongoDB..." +~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log +cd /data/job/build +# run tests +echo "Running tests..." +TEST_COUNT=$(ctest -N -L nonparallelizable_tests | grep -i 'Total Tests: ' | cut -d ':' -f 2 | awk '{print $1}') +[[ $TEST_COUNT > 0 ]] && echo "$TEST_COUNT tests found." || (echo "ERROR: No tests registered with ctest! Exiting..." && exit 1) +echo "$ ctest -L nonparallelizable_tests --output-on-failure -T Test" +ctest -L nonparallelizable_tests --output-on-failure -T Test +# upload artifacts +echo "Uploading artifacts..." +XML_FILENAME="test-results.xml" +mv $(pwd)/Testing/$(ls $(pwd)/Testing/ | grep '20' | tail -n 1)/Test.xml $XML_FILENAME +buildkite-agent artifact upload config.ini +buildkite-agent artifact upload genesis.json +cd .. +buildkite-agent artifact upload mongod.log +cd build +buildkite-agent artifact upload $XML_FILENAME +echo "Done uploading artifacts." From eef306c09d0eb7779c947e27b598648b6abb4f0a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 20 Mar 2019 10:41:43 -0500 Subject: [PATCH 14/49] Prevent txn_test_gen_plugin from calling back into http_plugin multiple times per request. --- plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp index 414664be32a..60383175387 100755 --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -69,7 +69,10 @@ using io_work_t = boost::asio::executor_work_guard>(0);\ + auto result_handler = [times_called{std::move(times_called)}, cb, body](const fc::exception_ptr& e) mutable {\ + if( ++(*times_called) > 1 ) return;\ if (e) {\ try {\ e->dynamic_rethrow_exception();\ From 68f92dc122a34c7b42a3dbf685b2cf9b314c0739 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 18 Mar 2019 10:25:56 -0500 Subject: [PATCH 15/49] Add strand to protect internals of asio --- plugins/net_plugin/net_plugin.cpp | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 320214ae933..a77a828032a 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -497,7 +497,8 @@ namespace eosio { transaction_state_index trx_state; optional peer_requested; // this peer is requesting info from us std::shared_ptr server_ioc; // keep ioc alive - socket_ptr socket; + boost::asio::io_context::strand strand; + socket_ptr socket; fc::message_buffer<1024*1024> pending_message_buffer; fc::optional outstanding_read_bytes; @@ -730,6 +731,7 @@ namespace eosio { trx_state(), peer_requested(), server_ioc( my_impl->server_ioc ), + strand( *my_impl->server_ioc ), socket( std::make_shared( std::ref( *my_impl->server_ioc ))), node_id(), last_handshake_recv(), @@ -755,6 +757,7 @@ namespace eosio { trx_state(), peer_requested(), server_ioc( my_impl->server_ioc ), + strand( *my_impl->server_ioc ), socket( s ), node_id(), last_handshake_recv(), @@ -976,7 +979,8 @@ namespace eosio { std::vector bufs; buffer_queue.fill_out_buffer( bufs ); - boost::asio::async_write(*socket, bufs, [c, priority]( boost::system::error_code ec, std::size_t w ) { + boost::asio::async_write(*socket, bufs, + boost::asio::bind_executor(strand, [c, priority]( boost::system::error_code ec, std::size_t w ) { app().post(priority, [c, priority, ec, w]() { try { auto conn = c.lock(); @@ -1016,7 +1020,7 @@ namespace eosio { fc_elog( logger,"Exception in do_queue_write to ${p}", ("p",pname) ); } }); - }); + })); } void connection::cancel_sync(go_away_reason reason) { @@ -1859,7 +1863,7 @@ namespace eosio { connection_wptr weak_conn = c; // Note: need to add support for IPv6 too - resolver->async_resolve( query, + resolver->async_resolve( query, boost::asio::bind_executor( c->strand, [weak_conn, this]( const boost::system::error_code& err, tcp::resolver::iterator endpoint_itr ) { app().post( priority::low, [err, endpoint_itr, weak_conn, this]() { auto c = weak_conn.lock(); @@ -1871,7 +1875,7 @@ namespace eosio { ("peer_addr", c->peer_name())( "error", err.message()) ); } } ); - } ); + } ) ); } void net_plugin_impl::connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr) { @@ -1883,7 +1887,8 @@ namespace eosio { ++endpoint_itr; c->connecting = true; connection_wptr weak_conn = c; - c->socket->async_connect( current_endpoint, [weak_conn, endpoint_itr, this]( const boost::system::error_code& err ) { + c->socket->async_connect( current_endpoint, boost::asio::bind_executor( c->strand, + [weak_conn, endpoint_itr, this]( const boost::system::error_code& err ) { app().post( priority::low, [weak_conn, endpoint_itr, this, err]() { auto c = weak_conn.lock(); if( !c ) return; @@ -1902,7 +1907,7 @@ namespace eosio { } } } ); - } ); + } ) ); } bool net_plugin_impl::start_session(const connection_ptr& con) { @@ -2052,6 +2057,7 @@ namespace eosio { ++conn->reads_in_flight; boost::asio::async_read(*conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, + boost::asio::bind_executor( conn->strand, [this,weak_conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { app().post( priority::medium, [this,weak_conn, ec, bytes_transferred]() { auto conn = weak_conn.lock(); @@ -2133,7 +2139,7 @@ namespace eosio { close( conn ); } }); - }); + })); } catch (...) { string pname = conn ? conn->peer_name() : "no connection name"; fc_elog( logger, "Undefined exception handling reading ${p}",("p",pname) ); From c39d5ea3c90e13a73db0f657ff92e9037ef5a8ac Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 19 Mar 2019 15:52:04 -0500 Subject: [PATCH 16/49] Ensure that intermediate asio operations are on the same thread --- plugins/net_plugin/net_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index a77a828032a..a26353ab387 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -731,7 +731,7 @@ namespace eosio { trx_state(), peer_requested(), server_ioc( my_impl->server_ioc ), - strand( *my_impl->server_ioc ), + strand( app().get_io_service() ), socket( std::make_shared( std::ref( *my_impl->server_ioc ))), node_id(), last_handshake_recv(), @@ -757,7 +757,7 @@ namespace eosio { trx_state(), peer_requested(), server_ioc( my_impl->server_ioc ), - strand( *my_impl->server_ioc ), + strand( app().get_io_service() ), socket( s ), node_id(), last_handshake_recv(), From b364446dde4533c08345c6f7b6c0bd29f5f019a7 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Fri, 22 Mar 2019 09:56:32 -0700 Subject: [PATCH 17/49] Issue 6940 (#6978) --- README.md | 14 +++++++++++++- scripts/eosio_install.sh | 2 +- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c36b2c6d1a5..e22a2b2cebc 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,11 @@ Block.one is neither launching nor operating any initial public blockchains base There is no public testnet running currently. -**If you have previously installed EOSIO, please run the `eosio_uninstall` script (it is in the directory where you cloned EOSIO) before downloading and using the binary releases.** +--- + +**If you used our build scripts to install eosio, [please be sure to uninstall](#build-script-uninstall) before using our packages.** + +--- #### Mac OS X Brew Install ```sh @@ -37,6 +41,7 @@ $ brew install eosio ```sh $ brew remove eosio ``` + #### Ubuntu 18.04 Debian Package Install ```sh $ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio_1.7.0-rc1-ubuntu-18.04_amd64.deb @@ -70,6 +75,13 @@ $ sudo yum install ./eosio-1.7.0-rc1.fc27.x86_64.rpm $ sudo yum remove eosio.cdt ``` +#### Build Script Uninstall + +If you have previously installed EOSIO using build scripts, you have two options (neither impact your data directory by default): + +1. `./scripts/eosio_uninstall.sh` - Will uninstall eosio, yet leave dependencies (you can use --full to delete your data directory). +2. `./scripts/full_uninstaller.sh` - Will uninstall eosio and dependencies (can be forced; see script). + ## Supported Operating Systems EOSIO currently supports the following operating systems: 1. Amazon 2017.09 and higher diff --git a/scripts/eosio_install.sh b/scripts/eosio_install.sh index ac5a731f2fd..a858fd63430 100755 --- a/scripts/eosio_install.sh +++ b/scripts/eosio_install.sh @@ -78,7 +78,7 @@ printf " \\__\\/ \\__\\/ \\__\\/ \\__\\/ printf "==============================================================================================\\n" printf "EOSIO has been installed into ${OPT_LOCATION}/eosio/bin!\\n" -printf "If you need to, you can fully uninstall using eosio_uninstall.sh && scripts/clean_old_install.sh.\\n" +printf "If you need to, you can uninstall using: ./scripts/full_uninstaller.sh (it will leave your data directory).\\n" printf "==============================================================================================\\n\\n" printf "EOSIO website: https://eos.io\\n" From b0b4fdae2f3514b325a49d5a2494e784f2fb66ad Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 22 Mar 2019 13:40:05 -0400 Subject: [PATCH 18/49] Remove setting CMAKE_OSX_SYSROOT Setting CMAKE_OSX_SYSROOT has shown to cause build failures on fresh macos 10.13 installs --- CMakeLists.txt | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f9375f0f8b9..00258c4b86d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -14,14 +14,8 @@ endif() list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/libraries/fc/CMakeModules") list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules") -if (UNIX) - if (APPLE) - execute_process(COMMAND xcrun --show-sdk-path - OUTPUT_VARIABLE CMAKE_OSX_SYSROOT - OUTPUT_STRIP_TRAILING_WHITESPACE) - list(APPEND CMAKE_PREFIX_PATH "/usr/local/opt/llvm@4") - list(APPEND CMAKE_PREFIX_PATH "/usr/local/opt/gettext") - endif() +if (UNIX AND APPLE) + list(APPEND CMAKE_PREFIX_PATH "/usr/local/opt/llvm@4" "/usr/local/opt/gettext") endif() include( GNUInstallDirs ) From cb98d8f756098547b8a35186c49cd8f138cbeee1 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 22 Feb 2019 08:10:05 -0600 Subject: [PATCH 19/49] Added ability to configure nodes that are not launched immediately. --- programs/eosio-launcher/main.cpp | 53 +++++++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 8a3a75a721b..066305e4122 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -247,6 +247,7 @@ class tn_node_def { vector producers; eosd_def* instance; string gelf_endpoint; + bool dont_start; }; void @@ -390,6 +391,8 @@ string producer_names::producer_name(unsigned int producer_number) { struct launcher_def { bool force_overwrite; size_t total_nodes; + size_t unstarted_nodes; + size_t total_nodes; size_t prod_nodes; size_t producers; size_t next_node; @@ -481,6 +484,7 @@ launcher_def::set_options (bpo::options_description &cfg) { cfg.add_options() ("force,f", bpo::bool_switch(&force_overwrite)->default_value(false), "Force overwrite of existing configuration files and erase blockchain") ("nodes,n",bpo::value(&total_nodes)->default_value(1),"total number of nodes to configure and launch") + ("unstarted-nodes",bpo::value(&unstarted_nodes)->default_value(0),"total number of nodes to configure, but not launch") ("pnodes,p",bpo::value(&prod_nodes)->default_value(1),"number of nodes that contain one or more producers") ("producers",bpo::value(&producers)->default_value(21),"total number of non-bios producer instances in this network") ("mode,m",bpo::value>()->multitoken()->default_value({"any"}, "any"),"connection mode, combination of \"any\", \"producers\", \"specified\", \"none\"") @@ -634,7 +638,31 @@ launcher_def::initialize (const variables_map &vmap) { if (prod_nodes > (producers + 1)) prod_nodes = producers; if (prod_nodes > total_nodes) - total_nodes = prod_nodes; + total_nodes = prod_nodes + unstarted_nodes; + else if (total_nodes < prod_nodes + unstarted_nodes) { + cerr << "ERROR: if provided, \"--nodes\" must be equal or greater than the number of nodes indicated by \"--pnodes\" and \"--unstarted-nodes\"." << endl; + exit (-1); + } + + if (vmap.count("specific-num")) { + const auto specific_nums = vmap["specific-num"].as>(); + const auto specific_args = vmap["specific-nodeos"].as>(); + if (specific_nums.size() != specific_args.size()) { + cerr << "ERROR: every specific-num argument must be paired with a specific-nodeos argument" << endl; + exit (-1); + } + // don't include bios + const auto allowed_nums = total_nodes - 1; + for(uint i = 0; i < specific_nums.size(); ++i) + { + const auto& num = specific_nums[i]; + if (num >= allowed_nums) { + cerr << "\"--specific-num\" provided value= " << num << " is higher than \"--nodes\" provided value=" << total_nodes << endl; + exit (-1); + } + specific_nodeos_args[num] = specific_args[i]; + } + } char* erd_env_var = getenv ("EOSIO_HOME"); if (erd_env_var == nullptr || std::string(erd_env_var).empty()) { @@ -733,7 +761,7 @@ launcher_def::generate () { write_dot_file (); if (!output.empty()) { - bfs::path savefile = output; + bfs::path savefile = output; { bfs::ofstream sf (savefile); sf << fc::json::to_pretty_string (network) << endl; @@ -754,6 +782,7 @@ launcher_def::generate () { } return false; } + return true; } @@ -864,6 +893,7 @@ launcher_def::bind_nodes () { int extra = producers % non_bios; unsigned int i = 0; unsigned int producer_number = 0; + const auto to_not_start_node = total_nodes - unstarted_nodes - 1; for (auto &h : bindings) { for (auto &inst : h.instances) { bool is_bios = inst.name == "bios"; @@ -894,6 +924,7 @@ launcher_def::bind_nodes () { ++producer_number; } } + node.dont_start = i >= to_not_start_node; } node.gelf_endpoint = gelf_endpoint; network.nodes[node.name] = move(node); @@ -1564,6 +1595,10 @@ launcher_def::launch (eosd_def &instance, string >s) { } if (!host->is_local()) { + if (instance.node->dont_start) { + cerr << "Unable to use \"unstarted-nodes\" with a remote hose" << endl; + exit (-1); + } string cmdl ("cd "); cmdl += host->eosio_home + "; nohup " + eosdcmd + " > " + reout.string() + " 2> " + reerr.string() + "& echo $! > " + pidf.string() @@ -1578,7 +1613,7 @@ launcher_def::launch (eosd_def &instance, string >s) { string cmd = "cd " + host->eosio_home + "; kill -15 $(cat " + pidf.string() + ")"; format_ssh (cmd, host->host_name, info.kill_cmd); } - else { + else if (!instance.node->dont_start) { cerr << "spawning child, " << eosdcmd << endl; bp::child c(eosdcmd, bp::std_out > reout, bp::std_err > reerr ); @@ -1600,6 +1635,16 @@ launcher_def::launch (eosd_def &instance, string >s) { } c.detach(); } + else { + cerr << "not spawning child, " << eosdcmd << endl; + + const bfs::path dd = instance.data_dir_name; + const bfs::path start_file = dd / "start.cmd"; + bfs::ofstream sf (start_file); + + sf << eosdcmd << endl; + sf.close(); + } last_run.running_nodes.emplace_back (move(info)); } @@ -2046,7 +2091,7 @@ FC_REFLECT( eosd_def, (p2p_endpoint) ) // @ignore instance, gelf_endpoint -FC_REFLECT( tn_node_def, (name)(keys)(peers)(producers) ) +FC_REFLECT( tn_node_def, (name)(keys)(peers)(producers)(dont_start) ) FC_REFLECT( testnet_def, (name)(ssh_helper)(nodes) ) From 328473c9af665c780bf00163cb58e03e85e5d7b4 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 22 Feb 2019 11:42:31 -0600 Subject: [PATCH 20/49] Cleanup of scripts. --- tests/Cluster.py | 63 ++++++++++--------- tests/Node.py | 3 +- ...onsensus-validation-malicious-producers.py | 2 +- tests/distributed-transactions-test.py | 2 +- tests/testUtils.py | 22 ++++++- 5 files changed, 58 insertions(+), 34 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index d41d8e8731d..2c2486d48bf 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -31,7 +31,6 @@ class Cluster(object): __LauncherCmdArr=[] __bootlog="eosio-ignition-wd/bootlog.txt" __configDir="etc/eosio/" - __dataDir="var/lib/" # pylint: disable=too-many-arguments # walletd [True|False] Is keosd running. If not load the wallet plugin @@ -128,11 +127,12 @@ def setAlternateVersionLabels(self, file): # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches # pylint: disable=too-many-statements - def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, + def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None, alternateVersionLabelsFile=None, associatedNodeLabels=None): """Launch cluster. pnodes: producer nodes count + unstartedNodes: non-producer nodes that are configured into the launch, but not started totalNodes: producer + non-producer nodes count prodCount: producers per producer node count topo: cluster topology (as defined by launcher, and "bridge" shape that is specific to this launch method) @@ -169,6 +169,8 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne if pnodes > totalNodes: raise RuntimeError("totalNodes (%d) must be equal to or greater than pnodes(%d)." % (totalNodes, pnodes)) + if pnodes + unstartedNodes > totalNodes: + raise RuntimeError("totalNodes (%d) must be equal to or greater than pnodes(%d) + unstartedNodes(%d)." % (totalNodes, pnodes, unstartedNodes)) if self.walletMgr is None: self.walletMgr=WalletMgr(True) @@ -806,15 +808,6 @@ def nodeNameToId(name): m=re.search(r"node_([\d]+)", name) return int(m.group(1)) - @staticmethod - def nodeExtensionToName(ext): - r"""Convert node extension (bios, 0, 1, etc) to node name. """ - prefix="node_" - if ext == "bios": - return prefix + ext - - return "node_%02d" % (ext) - @staticmethod def parseProducerKeys(configFile, nodeName): """Parse node config file for producer keys. Returns dictionary. (Keys: account name; Values: dictionary objects (Keys: ["name", "node", "private","public"]; Values: account name, node id returned by nodeNameToId(nodeName), private key(string)and public key(string))).""" @@ -852,7 +845,7 @@ def parseProducerKeys(configFile, nodeName): def parseProducers(nodeNum): """Parse node config file for producers.""" - configFile=Cluster.__configDir + Cluster.nodeExtensionToName(nodeNum) + "/config.ini" + configFile=Cluster.__configDir + Utils.nodeExtensionToName(nodeNum) + "/config.ini" if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) configStr=None with open(configFile, 'r') as f: @@ -870,7 +863,7 @@ def parseProducers(nodeNum): def parseClusterKeys(totalNodes): """Parse cluster config file. Updates producer keys data members.""" - nodeName=Cluster.nodeExtensionToName("bios") + nodeName=Utils.nodeExtensionToName("bios") configFile=Cluster.__configDir + nodeName + "/config.ini" if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) producerKeys=Cluster.parseProducerKeys(configFile, nodeName) @@ -879,7 +872,7 @@ def parseClusterKeys(totalNodes): return None for i in range(0, totalNodes): - nodeName=Cluster.nodeExtensionToName(i) + nodeName=Utils.nodeExtensionToName(i) configFile=Cluster.__configDir + nodeName + "/config.ini" if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) @@ -1254,7 +1247,7 @@ def myFunc(): @staticmethod def pgrepEosServerPattern(nodeInstance): - dataLocation=Cluster.__dataDir + Cluster.nodeExtensionToName(nodeInstance) + dataLocation=Utils.getNodeDataDir(nodeInstance) return r"[\n]?(\d+) (.* --data-dir %s .*)\n" % (dataLocation) # Populates list of EosInstanceInfo objects, matched to actual running instances @@ -1272,18 +1265,30 @@ def discoverLocalNodes(self, totalNodes, timeout=None): psOutDisplay=psOut[:6660]+"..." if Utils.Debug: Utils.Print("pgrep output: \"%s\"" % psOutDisplay) for i in range(0, totalNodes): - pattern=Cluster.pgrepEosServerPattern(i) - m=re.search(pattern, psOut, re.MULTILINE) - if m is None: - Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) + instance=self.discoverLocalNode(i, psOut) + if instance is None: break - instance=Node(self.host, self.port + i, pid=int(m.group(1)), cmd=m.group(2), walletMgr=self.walletMgr, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) - if Utils.Debug: Utils.Print("Node>", instance) nodes.append(instance) if Utils.Debug: Utils.Print("Found %d nodes" % (len(nodes))) return nodes + # Populate a node matched to actual running instance + def discoverLocalNode(self, nodeNum, psOut=None): + if psOut is None: + psOut=Cluster.pgrepEosServers(timeout) + if psOut is None: + Utils.Print("ERROR: No nodes discovered.") + return nodes + pattern=Cluster.pgrepEosServerPattern(nodeNum) + m=re.search(pattern, psOut, re.MULTILINE) + if m is None: + Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) + return None + instance=Node(self.host, self.port + nodeNum, pid=int(m.group(1)), cmd=m.group(2), walletMgr=self.walletMgr, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) + if Utils.Debug: Utils.Print("Node>", instance) + return instance + def discoverBiosNodePid(self, timeout=None): psOut=Cluster.pgrepEosServers(timeout=timeout) pattern=Cluster.pgrepEosServerPattern("bios") @@ -1348,20 +1353,20 @@ def __findFiles(path): return files def dumpErrorDetails(self): - fileName=os.path.join(Cluster.__configDir + Cluster.nodeExtensionToName("bios"), "config.ini") + fileName=os.path.join(Cluster.__configDir + Utils.nodeExtensionToName("bios"), "config.ini") Cluster.dumpErrorDetailImpl(fileName) - path=Cluster.__dataDir + Cluster.nodeExtensionToName("bios") + path=Utils.getNodeDataDir("bios") fileNames=Cluster.__findFiles(path) for fileName in fileNames: Cluster.dumpErrorDetailImpl(fileName) for i in range(0, len(self.nodes)): - configLocation=Cluster.__configDir + Cluster.nodeExtensionToName(i) + configLocation=Cluster.__configDir + Utils.nodeExtensionToName(i) fileName=os.path.join(configLocation, "config.ini") Cluster.dumpErrorDetailImpl(fileName) fileName=os.path.join(configLocation, "genesis.json") Cluster.dumpErrorDetailImpl(fileName) - path=Cluster.__dataDir + Cluster.nodeExtensionToName(i) + path=Utils.getNodeDataDir(i) fileNames=Cluster.__findFiles(path) for fileName in fileNames: Cluster.dumpErrorDetailImpl(fileName) @@ -1435,7 +1440,7 @@ def waitForNextBlock(self, timeout=None): return node.waitForNextBlock(timeout) def cleanup(self): - for f in glob.glob(Cluster.__dataDir + "node_*"): + for f in glob.glob(Utils.DataDir + "node_*"): shutil.rmtree(f) for f in glob.glob(Cluster.__configDir + "node_*"): shutil.rmtree(f) @@ -1510,7 +1515,7 @@ def printBlockLogIfNeeded(self): self.printBlockLog() def getBlockLog(self, nodeExtension): - blockLogDir=Cluster.__dataDir + Cluster.nodeExtensionToName(nodeExtension) + "/blocks/" + blockLogDir=os.path.join(Utils.getNodeDataDir(nodeExtension), "blocks", "") return Utils.getBlockLog(blockLogDir, exitOnError=False) def printBlockLog(self): @@ -1600,8 +1605,8 @@ def compareCommon(blockLogs, blockNameExtensions, first, last): if Utils.Debug: Utils.Print("context=%s" % (context)) ret=Utils.compare(commonBlockLogs[0], commonBlockLogs[i], context) if ret is not None: - blockLogDir1=Cluster.__dataDir + Cluster.nodeExtensionToName(commonBlockNameExtensions[0]) + "/blocks/" - blockLogDir2=Cluster.__dataDir + Cluster.nodeExtensionToName(commonBlockNameExtensions[i]) + "/blocks/" + blockLogDir1=Utils.DataDir + Utils.nodeExtensionToName(commonBlockNameExtensions[0]) + "/blocks/" + blockLogDir2=Utils.DataDir + Utils.nodeExtensionToName(commonBlockNameExtensions[i]) + "/blocks/" Utils.Print(Utils.FileDivider) Utils.Print("Block log from %s:\n%s" % (blockLogDir1, json.dumps(commonBlockLogs[0], indent=1))) Utils.Print(Utils.FileDivider) diff --git a/tests/Node.py b/tests/Node.py index 1c01893ceca..ab0859c7b0d 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1334,8 +1334,7 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim dataDir="var/lib/node_%02d" % (nodeId) dt = datetime.datetime.now() - dateStr="%d_%02d_%02d_%02d_%02d_%02d" % ( - dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) + dateStr=Utils.getDateString(dt) stdoutFile="%s/stdout.%s.txt" % (dataDir, dateStr) stderrFile="%s/stderr.%s.txt" % (dataDir, dateStr) with open(stdoutFile, 'w') as sout, open(stderrFile, 'w') as serr: diff --git a/tests/consensus-validation-malicious-producers.py b/tests/consensus-validation-malicious-producers.py index 971228854d9..e3c6d7fe50e 100755 --- a/tests/consensus-validation-malicious-producers.py +++ b/tests/consensus-validation-malicious-producers.py @@ -246,7 +246,7 @@ def myTest(transWillEnterBlock): topo="mesh" delay=0 Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo, delay) is False: + if cluster.launch(pnodes=pnodes, total_nodes=total_nodes, topo=topo, delay=delay) is False: error("Failed to stand up eos cluster.") return False diff --git a/tests/distributed-transactions-test.py b/tests/distributed-transactions-test.py index 5b302dcf141..c3b794b89c0 100755 --- a/tests/distributed-transactions-test.py +++ b/tests/distributed-transactions-test.py @@ -63,7 +63,7 @@ (pnodes, total_nodes-pnodes, topo, delay)) Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: + if cluster.launch(pnodes=pnodes, total_nodes=total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: errorExit("Failed to stand up eos cluster.") Print ("Wait for Cluster stabilization") diff --git a/tests/testUtils.py b/tests/testUtils.py index 9e7e9c604be..38719fb8455 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -36,6 +36,7 @@ class Utils: EosBlockLogPath="programs/eosio-blocklog/eosio-blocklog" FileDivider="=================================================================" + DataDir="var/lib/" @staticmethod def Print(*args, **kwargs): @@ -65,6 +66,24 @@ def setIrreversibleTimeout(timeout): def setSystemWaitTimeout(timeout): Utils.systemWaitTimeout=timeout + @staticmethod + def getDateString(dt): + return "%d_%02d_%02d_%02d_%02d_%02d" % ( + dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) + + @staticmethod + def nodeExtensionToName(ext): + r"""Convert node extension (bios, 0, 1, etc) to node name. """ + prefix="node_" + if ext == "bios": + return prefix + ext + + return "node_%02d" % (ext) + + @staticmethod + def getNodeDataDir(ext): + return os.path.join(Utils.DataDir, Utils.nodeExtensionToName(ext)) + @staticmethod def getChainStrategies(): chainSyncStrategies={} @@ -180,7 +199,8 @@ def runCmdArrReturnJson(cmdArr, trace=False, silentErrors=True): @staticmethod def runCmdReturnStr(cmd, trace=False): - retStr=Utils.checkOutput(cmd.split()) + cmdArr=shlex.split(cmd) + retStr=Utils.checkOutput(cmdArr) if trace: Utils.Print ("RAW > %s" % (retStr)) return retStr From 5aa5835e8897fd38fd0fdffc6434bf037af7d3cc Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 25 Feb 2019 08:36:08 -0600 Subject: [PATCH 21/49] Added config dir and data dir utils methods. --- tests/Cluster.py | 19 +++++++++---------- tests/testUtils.py | 19 +++++++++++++++++-- 2 files changed, 26 insertions(+), 12 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 2c2486d48bf..0e16c803f05 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -30,7 +30,6 @@ class Cluster(object): __BiosPort=8788 __LauncherCmdArr=[] __bootlog="eosio-ignition-wd/bootlog.txt" - __configDir="etc/eosio/" # pylint: disable=too-many-arguments # walletd [True|False] Is keosd running. If not load the wallet plugin @@ -845,7 +844,7 @@ def parseProducerKeys(configFile, nodeName): def parseProducers(nodeNum): """Parse node config file for producers.""" - configFile=Cluster.__configDir + Utils.nodeExtensionToName(nodeNum) + "/config.ini" + configFile=Utils.getNodeConfigDir(nodeNum, "config.ini") if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) configStr=None with open(configFile, 'r') as f: @@ -863,19 +862,19 @@ def parseProducers(nodeNum): def parseClusterKeys(totalNodes): """Parse cluster config file. Updates producer keys data members.""" - nodeName=Utils.nodeExtensionToName("bios") - configFile=Cluster.__configDir + nodeName + "/config.ini" + configFile=Utils.getNodeConfigDir("bios", "config.ini") if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) + nodeName=Utils.nodeExtensionToName("bios") producerKeys=Cluster.parseProducerKeys(configFile, nodeName) if producerKeys is None: Utils.Print("ERROR: Failed to parse eosio private keys from cluster config files.") return None for i in range(0, totalNodes): - nodeName=Utils.nodeExtensionToName(i) - configFile=Cluster.__configDir + nodeName + "/config.ini" + configFile=Utils.getNodeConfigDir(i, "config.ini") if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) + nodeName=Utils.nodeExtensionToName(i) keys=Cluster.parseProducerKeys(configFile, nodeName) if keys is not None: producerKeys.update(keys) @@ -1353,7 +1352,7 @@ def __findFiles(path): return files def dumpErrorDetails(self): - fileName=os.path.join(Cluster.__configDir + Utils.nodeExtensionToName("bios"), "config.ini") + fileName=Utils.getNodeConfigDir("bios", "config.ini") Cluster.dumpErrorDetailImpl(fileName) path=Utils.getNodeDataDir("bios") fileNames=Cluster.__findFiles(path) @@ -1361,7 +1360,7 @@ def dumpErrorDetails(self): Cluster.dumpErrorDetailImpl(fileName) for i in range(0, len(self.nodes)): - configLocation=Cluster.__configDir + Utils.nodeExtensionToName(i) + configLocation=Utils.getNodeConfigDir(i) fileName=os.path.join(configLocation, "config.ini") Cluster.dumpErrorDetailImpl(fileName) fileName=os.path.join(configLocation, "genesis.json") @@ -1442,7 +1441,7 @@ def waitForNextBlock(self, timeout=None): def cleanup(self): for f in glob.glob(Utils.DataDir + "node_*"): shutil.rmtree(f) - for f in glob.glob(Cluster.__configDir + "node_*"): + for f in glob.glob(Utils.ConfigDir + "node_*"): shutil.rmtree(f) for f in self.filesToCleanup: @@ -1515,7 +1514,7 @@ def printBlockLogIfNeeded(self): self.printBlockLog() def getBlockLog(self, nodeExtension): - blockLogDir=os.path.join(Utils.getNodeDataDir(nodeExtension), "blocks", "") + blockLogDir=Utils.getNodeDataDir(nodeExtension, "blocks") return Utils.getBlockLog(blockLogDir, exitOnError=False) def printBlockLog(self): diff --git a/tests/testUtils.py b/tests/testUtils.py index 38719fb8455..107be3f087a 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -37,6 +37,7 @@ class Utils: FileDivider="=================================================================" DataDir="var/lib/" + ConfigDir="etc/eosio/" @staticmethod def Print(*args, **kwargs): @@ -81,8 +82,22 @@ def nodeExtensionToName(ext): return "node_%02d" % (ext) @staticmethod - def getNodeDataDir(ext): - return os.path.join(Utils.DataDir, Utils.nodeExtensionToName(ext)) + def getNodeDataDir(ext, relativeDir=None, trailingSlash=False): + path=os.path.join(Utils.DataDir, Utils.nodeExtensionToName(ext)) + if relativeDir is not None: + path=os.path.join(path, relativeDir) + if trailingSlash: + path=os.path.join(path, "") + return path + + @staticmethod + def getNodeConfigDir(ext, relativeDir=None, trailingSlash=False): + path=os.path.join(Utils.ConfigDir, Utils.nodeExtensionToName(ext)) + if relativeDir is not None: + path=os.path.join(path, relativeDir) + if trailingSlash: + path=os.path.join(path, "") + return path @staticmethod def getChainStrategies(): From 50ea21fa9a6ddfc1feb46926fbcfa438975583ba Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 26 Feb 2019 08:44:31 -0600 Subject: [PATCH 22/49] Refactoring relaunch logic to allow for a general launch via a command line. --- tests/Node.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index ab0859c7b0d..802aa35e9df 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1332,19 +1332,8 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim myCmd=" ".join(cmdArr) - dataDir="var/lib/node_%02d" % (nodeId) - dt = datetime.datetime.now() - dateStr=Utils.getDateString(dt) - stdoutFile="%s/stdout.%s.txt" % (dataDir, dateStr) - stderrFile="%s/stderr.%s.txt" % (dataDir, dateStr) - with open(stdoutFile, 'w') as sout, open(stderrFile, 'w') as serr: - cmd=myCmd + ("" if chainArg is None else (" " + chainArg)) - Utils.Print("cmd: %s" % (cmd)) - popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr) - if cachePopen: - self.popenProc=popen - self.pid=popen.pid - if Utils.Debug: Utils.Print("restart Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) + cmd=myCmd + ("" if chainArg is None else (" " + chainArg)) + self.launchCmd(cmd, nodeId) def isNodeAlive(): """wait for node to be responsive.""" @@ -1366,6 +1355,20 @@ def isNodeAlive(): self.killed=False return True + def launchCmd(self, cmd, nodeId): + dataDir=Utils.getNodeDataDir(nodeId) + dt = datetime.datetime.now() + dateStr=Utils.getDateString(dt) + stdoutFile="%s/stdout.%s.txt" % (dataDir, dateStr) + stderrFile="%s/stderr.%s.txt" % (dataDir, dateStr) + with open(stdoutFile, 'w') as sout, open(stderrFile, 'w') as serr: + Utils.Print("cmd: %s" % (cmd)) + popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr) + if cachePopen: + self.popenProc=popen + self.pid=popen.pid + if Utils.Debug: Utils.Print("restart Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) + def trackCmdTransaction(self, trans, ignoreNonTrans=False): if trans is None: if Utils.Debug: Utils.Print(" cmd returned transaction: %s" % (trans)) From 7206f767e3a08d4ada52c86c89816e0a4f679783 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 6 Mar 2019 13:40:27 -0600 Subject: [PATCH 23/49] Fixed initialization of bios node and fixed merge error. --- programs/eosio-launcher/main.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 066305e4122..51a0808103b 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -247,7 +247,7 @@ class tn_node_def { vector producers; eosd_def* instance; string gelf_endpoint; - bool dont_start; + bool dont_start = false; }; void @@ -392,7 +392,6 @@ struct launcher_def { bool force_overwrite; size_t total_nodes; size_t unstarted_nodes; - size_t total_nodes; size_t prod_nodes; size_t producers; size_t next_node; @@ -893,7 +892,7 @@ launcher_def::bind_nodes () { int extra = producers % non_bios; unsigned int i = 0; unsigned int producer_number = 0; - const auto to_not_start_node = total_nodes - unstarted_nodes - 1; + const auto to_not_start_node = total_nodes - unstarted_nodes; for (auto &h : bindings) { for (auto &inst : h.instances) { bool is_bios = inst.name == "bios"; From 5c71b5d5e75eeb30bcbb5cf1906570b248b1f4a3 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 6 Mar 2019 13:42:33 -0600 Subject: [PATCH 24/49] Fixed error in launchCmd refactor. GH #6727 --- tests/Node.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 802aa35e9df..8f15ba5fece 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1333,7 +1333,7 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim myCmd=" ".join(cmdArr) cmd=myCmd + ("" if chainArg is None else (" " + chainArg)) - self.launchCmd(cmd, nodeId) + self.launchCmd(cmd, nodeId, cachePopen) def isNodeAlive(): """wait for node to be responsive.""" @@ -1355,7 +1355,7 @@ def isNodeAlive(): self.killed=False return True - def launchCmd(self, cmd, nodeId): + def launchCmd(self, cmd, nodeId, cachePopen=False): dataDir=Utils.getNodeDataDir(nodeId) dt = datetime.datetime.now() dateStr=Utils.getDateString(dt) From 5f86a9d20bc8ba4014a0397531104e7deb6bae57 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 6 Mar 2019 13:43:25 -0600 Subject: [PATCH 25/49] Fixed errors from previous attempt to explicitly set parameters. GH #6727 --- tests/consensus-validation-malicious-producers.py | 2 +- tests/distributed-transactions-test.py | 2 +- tests/restart-scenarios-test.py | 2 +- tests/validate-dirty-db.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/consensus-validation-malicious-producers.py b/tests/consensus-validation-malicious-producers.py index e3c6d7fe50e..6a3ac94d511 100755 --- a/tests/consensus-validation-malicious-producers.py +++ b/tests/consensus-validation-malicious-producers.py @@ -246,7 +246,7 @@ def myTest(transWillEnterBlock): topo="mesh" delay=0 Print("Stand up cluster") - if cluster.launch(pnodes=pnodes, total_nodes=total_nodes, topo=topo, delay=delay) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay) is False: error("Failed to stand up eos cluster.") return False diff --git a/tests/distributed-transactions-test.py b/tests/distributed-transactions-test.py index c3b794b89c0..2ea4edfe462 100755 --- a/tests/distributed-transactions-test.py +++ b/tests/distributed-transactions-test.py @@ -63,7 +63,7 @@ (pnodes, total_nodes-pnodes, topo, delay)) Print("Stand up cluster") - if cluster.launch(pnodes=pnodes, total_nodes=total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: errorExit("Failed to stand up eos cluster.") Print ("Wait for Cluster stabilization") diff --git a/tests/restart-scenarios-test.py b/tests/restart-scenarios-test.py index 6b3c217d75d..894a7d0d271 100755 --- a/tests/restart-scenarios-test.py +++ b/tests/restart-scenarios-test.py @@ -66,7 +66,7 @@ pnodes, topo, delay, chainSyncStrategyStr)) Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: errorExit("Failed to stand up eos cluster.") Print ("Wait for Cluster stabilization") diff --git a/tests/validate-dirty-db.py b/tests/validate-dirty-db.py index ac7520bc353..afcf2767b73 100755 --- a/tests/validate-dirty-db.py +++ b/tests/validate-dirty-db.py @@ -74,7 +74,7 @@ def runNodeosAndGetOutput(myTimeout=3): pnodes, topo, delay, chainSyncStrategyStr)) Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo=topo, delay=delay, dontBootstrap=True) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay, dontBootstrap=True) is False: errorExit("Failed to stand up eos cluster.") node=cluster.getNode(0) From b6852154701c309c389de6501b9ca8f7cf8f213d Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 9 Mar 2019 07:52:30 -0600 Subject: [PATCH 26/49] Cleanup. --- tests/nodeos_forked_chain_test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 4ef22ab082f..a7f2c777e3c 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -7,7 +7,6 @@ from WalletMgr import WalletMgr from Node import BlockType from Node import Node -from TestHelper import AppArgs from TestHelper import TestHelper import decimal From 87e9e61cc48c80505f650bf8cf3c54fe12de7bf1 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 9 Mar 2019 07:53:25 -0600 Subject: [PATCH 27/49] Added support for adding true flag. --- tests/TestHelper.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/TestHelper.py b/tests/TestHelper.py index a9920a731c1..768fccef890 100644 --- a/tests/TestHelper.py +++ b/tests/TestHelper.py @@ -22,6 +22,11 @@ def add(self, flag, type, help, default, choices=None): arg=self.AppArg(flag, type, help, default, choices) self.args.append(arg) + + def add_bool(self, flag, help, action='store_true'): + arg=self.AppArg(flag=flag, help=help, action=action) + self.args.append(arg) + # pylint: disable=too-many-instance-attributes class TestHelper(object): LOCAL_HOST="localhost" From a5ab5ba9ec1007f02d57c7357ca7abf420748ba4 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 9 Mar 2019 08:00:40 -0600 Subject: [PATCH 28/49] Fixing logic for launching started and unstarted nodes. --- tests/Cluster.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 0e16c803f05..258532331f1 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -131,8 +131,8 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me associatedNodeLabels=None): """Launch cluster. pnodes: producer nodes count - unstartedNodes: non-producer nodes that are configured into the launch, but not started - totalNodes: producer + non-producer nodes count + unstartedNodes: non-producer nodes that are configured into the launch, but not started. Should be included in totalNodes. + totalNodes: producer + non-producer nodes + unstarted non-producer nodes count prodCount: producers per producer node count topo: cluster topology (as defined by launcher, and "bridge" shape that is specific to this launch method) delay: delay between individual nodes launch (as defined by launcher) @@ -189,14 +189,14 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me tries = tries - 1 time.sleep(2) - cmd="%s -p %s -n %s -d %s -i %s -f --p2p-plugin %s %s" % ( + cmd="%s -p %s -n %s -d %s -i %s -f --p2p-plugin %s %s --unstarted-nodes %s" % ( Utils.EosLauncherPath, pnodes, totalNodes, delay, datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3], - p2pPlugin, producerFlag) + p2pPlugin, producerFlag, unstartedNodes) cmdArr=cmd.split() if self.staging: cmdArr.append("--nogen") - nodeosArgs="--max-transaction-time -1 --abi-serializer-max-time-ms 990000 --filter-on * --p2p-max-nodes-per-host %d" % (totalNodes) + nodeosArgs="--max-transaction-time -1 --abi-serializer-max-time-ms 990000 --filter-on \"*\" --p2p-max-nodes-per-host %d" % (totalNodes) if not self.walletd: nodeosArgs += " --plugin eosio::wallet_api_plugin" if self.enableMongo: @@ -262,7 +262,7 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me # of two entries - [ , ] with first being the name and second being the node definition shapeFileNodes = shapeFileObject["nodes"] - numProducers=totalProducers if totalProducers is not None else totalNodes + numProducers=totalProducers if totalProducers is not None else (totalNodes - unstartedNodes) maxProducers=ord('z')-ord('a')+1 assert numProducers Date: Sat, 9 Mar 2019 08:04:35 -0600 Subject: [PATCH 29/49] Fixed txn_test_gen_plugin to allow using different prefixes for the test accounts. --- .../txn_test_gen_plugin.cpp | 82 +++++++++++-------- 1 file changed, 50 insertions(+), 32 deletions(-) diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp index 60383175387..deea09ace55 100755 --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -101,6 +101,9 @@ struct txn_test_gen_plugin_impl { uint16_t thread_pool_size; optional thread_pool; std::shared_ptr timer; + name newaccountA; + name newaccountB; + name newaccountT; void push_next_transaction(const std::shared_ptr>& trxs, const std::function& next ) { chain_plugin& cp = app().get_plugin(); @@ -131,9 +134,6 @@ struct txn_test_gen_plugin_impl { trxs.reserve(2); try { - name newaccountA("txn.test.a"); - name newaccountB("txn.test.b"); - name newaccountC("txn.test.t"); name creator(init_name); abi_def currency_abi_def = fc::json::from_string(contracts::eosio_token_abi().data()).as(); @@ -170,12 +170,12 @@ struct txn_test_gen_plugin_impl { trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountB, owner_auth, active_auth}); } - //create "txn.test.t" account + //create "T" account { auto owner_auth = eosio::chain::authority{1, {{txn_text_receiver_C_pub_key, 1}}, {}}; auto active_auth = eosio::chain::authority{1, {{txn_text_receiver_C_pub_key, 1}}, {}}; - trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountC, owner_auth, active_auth}); + trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountT, owner_auth, active_auth}); } trx.expiration = cc.head_block_time() + fc::seconds(30); @@ -184,55 +184,67 @@ struct txn_test_gen_plugin_impl { trxs.emplace_back(std::move(trx)); } - //set txn.test.t contract to eosio.token & initialize it + //set newaccountT contract to eosio.token & initialize it { signed_transaction trx; vector wasm = contracts::eosio_token_wasm(); setcode handler; - handler.account = newaccountC; + handler.account = newaccountT; handler.code.assign(wasm.begin(), wasm.end()); - trx.actions.emplace_back( vector{{newaccountC,"active"}}, handler); + trx.actions.emplace_back( vector{{newaccountT,"active"}}, handler); { setabi handler; - handler.account = newaccountC; + handler.account = newaccountT; handler.abi = fc::raw::pack(json::from_string(contracts::eosio_token_abi().data()).as()); - trx.actions.emplace_back( vector{{newaccountC,"active"}}, handler); + trx.actions.emplace_back( vector{{newaccountT,"active"}}, handler); } { action act; - act.account = N(txn.test.t); + act.account = newaccountT; act.name = N(create); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("create", fc::json::from_string("{\"issuer\":\"txn.test.t\",\"maximum_supply\":\"1000000000.0000 CUR\"}}"), abi_serializer_max_time); + act.authorization = vector{{newaccountT,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("create", + fc::json::from_string(fc::format_string("{\"issuer\":\"${issuer}\",\"maximum_supply\":\"1000000000.0000 CUR\"}}", + fc::mutable_variant_object()("issuer",newaccountT.to_string()))), + abi_serializer_max_time); trx.actions.push_back(act); } { action act; - act.account = N(txn.test.t); + act.account = newaccountT; act.name = N(issue); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("issue", fc::json::from_string("{\"to\":\"txn.test.t\",\"quantity\":\"60000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + act.authorization = vector{{newaccountT,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("issue", + fc::json::from_string(fc::format_string("{\"to\":\"${to}\",\"quantity\":\"60000.0000 CUR\",\"memo\":\"\"}", + fc::mutable_variant_object()("to",newaccountT.to_string()))), + abi_serializer_max_time); trx.actions.push_back(act); } { action act; - act.account = N(txn.test.t); + act.account = newaccountT; act.name = N(transfer); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("transfer", fc::json::from_string("{\"from\":\"txn.test.t\",\"to\":\"txn.test.a\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + act.authorization = vector{{newaccountT,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"${to}\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}", + fc::mutable_variant_object()("from",newaccountT.to_string())("to",newaccountA.to_string()))), + abi_serializer_max_time); trx.actions.push_back(act); } { action act; - act.account = N(txn.test.t); + act.account = newaccountT; act.name = N(transfer); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("transfer", fc::json::from_string("{\"from\":\"txn.test.t\",\"to\":\"txn.test.b\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + act.authorization = vector{{newaccountT,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"${to}\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}", + fc::mutable_variant_object()("from",newaccountT.to_string())("to",newaccountB.to_string()))), + abi_serializer_max_time); trx.actions.push_back(act); } @@ -266,20 +278,20 @@ struct txn_test_gen_plugin_impl { auto abi_serializer_max_time = app().get_plugin().get_abi_serializer_max_time(); abi_serializer eosio_token_serializer{fc::json::from_string(contracts::eosio_token_abi().data()).as(), abi_serializer_max_time}; //create the actions here - act_a_to_b.account = N(txn.test.t); + act_a_to_b.account = newaccountT; act_a_to_b.name = N(transfer); - act_a_to_b.authorization = vector{{name("txn.test.a"),config::active_name}}; - act_a_to_b.data = eosio_token_serializer.variant_to_binary("transfer", - fc::json::from_string(fc::format_string("{\"from\":\"txn.test.a\",\"to\":\"txn.test.b\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", - fc::mutable_variant_object()("l", salt))), + act_a_to_b.authorization = vector{{newaccountA,config::active_name}}; + act_a_to_b.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"{to}\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", + fc::mutable_variant_object()("from",newaccountA.to_string())("to",newaccountB.to_string())("l", salt))), abi_serializer_max_time); - act_b_to_a.account = N(txn.test.t); + act_b_to_a.account = newaccountT; act_b_to_a.name = N(transfer); - act_b_to_a.authorization = vector{{name("txn.test.b"),config::active_name}}; - act_b_to_a.data = eosio_token_serializer.variant_to_binary("transfer", - fc::json::from_string(fc::format_string("{\"from\":\"txn.test.b\",\"to\":\"txn.test.a\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", - fc::mutable_variant_object()("l", salt))), + act_b_to_a.authorization = vector{{newaccountB,config::active_name}}; + act_b_to_a.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"${to}\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", + fc::mutable_variant_object()("from",newaccountB.to_string())("to",newaccountA.to_string())("l", salt))), abi_serializer_max_time); timer_timeout = period; @@ -371,6 +383,7 @@ struct txn_test_gen_plugin_impl { next(e.dynamic_copy_exception()); } + ilog("send ${c} transactions", ("c",trxs.size())); push_transactions(std::move(trxs), next); } @@ -414,6 +427,7 @@ void txn_test_gen_plugin::set_program_options(options_description&, options_desc cfg.add_options() ("txn-reference-block-lag", bpo::value()->default_value(0), "Lag in number of blocks from the head block when selecting the reference block for transactions (-1 means Last Irreversible Block)") ("txn-test-gen-threads", bpo::value()->default_value(2), "Number of worker threads in txn_test_gen thread pool") + ("txn-test-gen-account-prefix", bpo::value()->default_value("txn.test."), "Prefix to use for accounts generated and used by this plugin") ; } @@ -422,6 +436,10 @@ void txn_test_gen_plugin::plugin_initialize(const variables_map& options) { my.reset( new txn_test_gen_plugin_impl ); my->txn_reference_block_lag = options.at( "txn-reference-block-lag" ).as(); my->thread_pool_size = options.at( "txn-test-gen-threads" ).as(); + const std::string thread_pool_account_prefix = options.at( "txn-test-gen-account-prefix" ).as(); + my->newaccountA = thread_pool_account_prefix + "a"; + my->newaccountB = thread_pool_account_prefix + "b"; + my->newaccountT = thread_pool_account_prefix + "t"; EOS_ASSERT( my->thread_pool_size > 0, chain::plugin_config_exception, "txn-test-gen-threads ${num} must be greater than 0", ("num", my->thread_pool_size) ); } FC_LOG_AND_RETHROW() From 52ac5788faea3696af3aea745ffcdc8e48dfad0a Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 9 Mar 2019 08:08:23 -0600 Subject: [PATCH 30/49] Pulled out curl processing into its own function and added functions for interacting with the test accounts. --- tests/Node.py | 38 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 8f15ba5fece..77c3157b5dc 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1075,8 +1075,12 @@ def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head assert(isinstance(blockType, BlockType)) assert(isinstance(returnType, ReturnType)) basedOnLib="true" if blockType==BlockType.lib else "false" - cmd="curl %s/v1/test_control/kill_node_on_producer -d '{ \"producer\":\"%s\", \"where_in_sequence\":%d, \"based_on_lib\":\"%s\" }' -X POST -H \"Content-Type: application/json\"" % \ - (self.endpointHttp, producer, whereInSequence, basedOnLib) + payload="{ \"producer\":\"%s\", \"where_in_sequence\":%d, \"based_on_lib\":\"%s\" }" % (producer, whereInSequence, basedOnLib) + return self.processCurlCmd("test_control", "kill_node_on_producer", payload, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=exitMsg, returnType=returnType) + + def processCurlCmd(self, resource, command, payload, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): + cmd="curl %s/v1/%s/%s -d '%s' -X POST -H \"Content-Type: application/json\"" % \ + (self.endpointHttp, resource, command, payload) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) rtn=None start=time.perf_counter() @@ -1113,6 +1117,23 @@ def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head return rtn + def txnGenCreateTestAccounts(self, genAccount, genKey, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): + assert(isinstance(genAccount, str)) + assert(isinstance(genKey, str)) + assert(isinstance(returnType, ReturnType)) + + payload="[ \"%s\", \"%s\" ]" % (genAccount, genKey) + return self.processCurlCmd("txn_test_gen", "create_test_accounts", payload, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=exitMsg, returnType=returnType) + + def txnGenStart(self, salt, period, batchSize, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): + assert(isinstance(salt, str)) + assert(isinstance(period, int)) + assert(isinstance(batchSize, int)) + assert(isinstance(returnType, ReturnType)) + + payload="[ \"%s\", %d, %d ]" % (salt, period, batchSize) + return self.processCurlCmd("txn_test_gen", "start_generation", payload, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=exitMsg, returnType=returnType) + def waitForTransBlockIfNeeded(self, trans, waitForTransBlock, exitOnError=False): if not waitForTransBlock: return trans @@ -1355,6 +1376,19 @@ def isNodeAlive(): self.killed=False return True + def launchUnstarted(self, nodeId, cachePopen=False): + startFile=Utils.getNodeDataDir(nodeId, "start.cmd") + if not os.path.exists(startFile): + Utils.Print("Cannot launch unstarted process since %s file does not exist" % startFile) + return False + + with open(startFile, 'r') as file: + cmd=file.read() + Utils.Print("launchUnstarted cmd: %s" % (cmd)) + + self.launchCmd(cmd, nodeId, cachePopen) + return True + def launchCmd(self, cmd, nodeId, cachePopen=False): dataDir=Utils.getNodeDataDir(nodeId) dt = datetime.datetime.now() From 277df3c0c5319d7ad38e0e44b7e3e2244a0a9b7e Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 11 Mar 2019 21:59:07 -0500 Subject: [PATCH 31/49] Fix error in variable substitution. --- plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp index deea09ace55..780127efc15 100755 --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -130,6 +130,7 @@ struct txn_test_gen_plugin_impl { } void create_test_accounts(const std::string& init_name, const std::string& init_priv_key, const std::function& next) { + ilog("create_test_accounts"); std::vector trxs; trxs.reserve(2); @@ -282,7 +283,7 @@ struct txn_test_gen_plugin_impl { act_a_to_b.name = N(transfer); act_a_to_b.authorization = vector{{newaccountA,config::active_name}}; act_a_to_b.data = eosio_token_serializer.variant_to_binary("transfer", - fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"{to}\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"${to}\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", fc::mutable_variant_object()("from",newaccountA.to_string())("to",newaccountB.to_string())("l", salt))), abi_serializer_max_time); From 3089f7039010ad6ba438cde1cb4055b2d80f1d5a Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 11 Mar 2019 22:01:36 -0500 Subject: [PATCH 32/49] Add option to not load system contract. GH #6727 --- tests/Cluster.py | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 258532331f1..8665deffb32 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -128,7 +128,7 @@ def setAlternateVersionLabels(self, file): # pylint: disable=too-many-statements def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None, alternateVersionLabelsFile=None, - associatedNodeLabels=None): + associatedNodeLabels=None, loadSystemContract=True): """Launch cluster. pnodes: producer nodes count unstartedNodes: non-producer nodes that are configured into the launch, but not started. Should be included in totalNodes. @@ -147,6 +147,7 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me --specific-nodeos flags on launcher), example: { "5" : "--plugin eosio::test_control_api_plugin" } alternateVersionLabelsFile: Supply an alternate version labels file to use with associatedNodeLabels. associatedNodeLabels: Supply a dictionary of node numbers to use an alternate label for a specific node. + loadSystemContract: indicate whether the eosio.system contract should be loaded (setting this to False causes useBiosBootFile to be treated as False) """ assert(isinstance(topo, str)) if alternateVersionLabelsFile is not None: @@ -397,8 +398,10 @@ def connectGroup(group, producerNodes, bridgeNodes) : return True Utils.Print("Bootstrap cluster.") + if not loadSystemContract: + useBiosBootFile=False #ensure we use Cluster.bootstrap if onlyBios or not useBiosBootFile: - self.biosNode=Cluster.bootstrap(startedNodes, prodCount, totalProducers, Cluster.__BiosHost, Cluster.__BiosPort, self.walletMgr, onlyBios) + self.biosNode=Cluster.bootstrap(startedNodes, prodCount, totalProducers, Cluster.__BiosHost, Cluster.__BiosPort, self.walletMgr, onlyBios, loadSystemContract) if self.biosNode is None: Utils.Print("ERROR: Bootstrap failed.") return False @@ -965,7 +968,7 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr, silent=False): return biosNode @staticmethod - def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletMgr, onlyBios=False): + def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletMgr, onlyBios=False, loadSystemContract=True): """Create 'prodCount' init accounts and deposits 10000000000 SYS in each. If prodCount is -1 will initialize all possible producers. Ensure nodes are inter-connected prior to this call. One way to validate this will be to check if every node has block 1.""" @@ -1187,17 +1190,18 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletM (expectedAmount, actualAmount)) return None - contract="eosio.system" - contractDir="unittests/contracts/%s" % (contract) - wasmFile="%s.wasm" % (contract) - abiFile="%s.abi" % (contract) - Utils.Print("Publish %s contract" % (contract)) - trans=biosNode.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) - if trans is None: - Utils.Print("ERROR: Failed to publish contract %s." % (contract)) - return None + if loadSystemContract: + contract="eosio.system" + contractDir="unittests/contracts/%s" % (contract) + wasmFile="%s.wasm" % (contract) + abiFile="%s.abi" % (contract) + Utils.Print("Publish %s contract" % (contract)) + trans=biosNode.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) + if trans is None: + Utils.Print("ERROR: Failed to publish contract %s." % (contract)) + return None - Node.validateTransaction(trans) + Node.validateTransaction(trans) initialFunds="1000000.0000 {0}".format(CORE_SYMBOL) Utils.Print("Transfer initial fund %s to individual accounts." % (initialFunds)) From 0532e5c787372453c44814a645cc522c3e6e6020 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 11 Mar 2019 22:07:10 -0500 Subject: [PATCH 33/49] Add test to ensure catchup lockup does not occur. GH #6727 --- tests/nodeos_startup_catchup.py | 97 +++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100755 tests/nodeos_startup_catchup.py diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py new file mode 100755 index 00000000000..da75a72b23b --- /dev/null +++ b/tests/nodeos_startup_catchup.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 + +from testUtils import Utils +import testUtils +import time +from Cluster import Cluster +from WalletMgr import WalletMgr +from Node import Node +from TestHelper import AppArgs +from TestHelper import TestHelper + +import decimal +import math +import re + +############################################################### +# nodeos_startup_catchup +# Test configures a producing node and <--txn-plugins count> non-producing nodes with the +# txn_test_gen_plugin. Each non-producing node starts generating transactions and sending them +# to the producing node. +# 1) After 10 seconds a new node is started. +# 2) 10 seconds later, that node is checked to see if it has caught up to the producing node and +# that node is killed and a new node is started. +# 3) Repeat step 2, <--catchup-count - 1> more times +############################################################### + +Print=Utils.Print +errorExit=Utils.errorExit + +from core_symbol import CORE_SYMBOL + +appArgs=AppArgs() +extraArgs = appArgs.add(flag="--catchup-count", type=int, help="How many catchup-nodes to launch", default=10) +extraArgs = appArgs.add(flag="--txn-gen-nodes", type=int, help="How many transaction generator nodes", default=4) +args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", + "-p","--p2p-plugin","--wallet-port"}, applicationSpecificArgs=appArgs) +Utils.Debug=args.v +pnodes=args.p if args.p > 0 else 1 +startedNonProdNodes = args.txn_gen_nodes if args.txn_gen_nodes >= 2 else 2 +cluster=Cluster(walletd=True) +dumpErrorDetails=args.dump_error_details +keepLogs=args.keep_logs +dontKill=args.leave_running +prodCount=args.prod_count if args.prod_count > 1 else 2 +killAll=args.clean_run +p2pPlugin=args.p2p_plugin +walletPort=args.wallet_port +catchupCount=args.catchup_count +totalNodes=startedNonProdNodes+pnodes+catchupCount + +walletMgr=WalletMgr(True, port=walletPort) +testSuccessful=False +killEosInstances=not dontKill +killWallet=not dontKill + +WalletdName=Utils.EosWalletName +ClientName="cleos" + +try: + TestHelper.printSystemInfo("BEGIN") + cluster.setWalletMgr(walletMgr) + + cluster.killall(allInstances=killAll) + cluster.cleanup() + specificExtraNodeosArgs={} + txnGenNodeNum=pnodes # next node after producer nodes + for nodeNum in range(txnGenNodeNum, txnGenNodeNum+startedNonProdNodes): + specificExtraNodeosArgs[nodeNum]="--plugin eosio::txn_test_gen_plugin --txn-test-gen-account-prefix txntestacct" + Print("Stand up cluster") + if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=pnodes, totalNodes=totalNodes, totalProducers=pnodes*prodCount, p2pPlugin=p2pPlugin, + useBiosBootFile=False, specificExtraNodeosArgs=specificExtraNodeosArgs, unstartedNodes=catchupCount, loadSystemContract=False) is False: + Utils.cmdError("launcher") + Utils.errorExit("Failed to stand up eos cluster.") + + Print("Validating system accounts after bootstrap") + cluster.validateAccounts(None) + + txnGenNodes=[] + for nodeNum in range(txnGenNodeNum, txnGenNodeNum+startedNonProdNodes): + txnGenNodes.append(cluster.getNode(nodeNum)) + + txnGenNodes[0].txnGenCreateTestAccounts(cluster.eosioAccount.name, cluster.eosioAccount.activePrivateKey) + time.sleep(20) + + for genNum in range(0, len(txnGenNodes)): + salt="%d" % genNum + txnGenNodes[genNum].txnGenStart(salt, 1000, 200) + + time.sleep(10) + + + testSuccessful=True + +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + +exit(0) From 97f777bbb073316680a2e5214ede61a1503c397c Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 22 Mar 2019 22:57:01 -0500 Subject: [PATCH 34/49] Fixed launcher setup of unstarted nodes. GH #6727. --- programs/eosio-launcher/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 51a0808103b..35f12b94e75 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -892,7 +892,7 @@ launcher_def::bind_nodes () { int extra = producers % non_bios; unsigned int i = 0; unsigned int producer_number = 0; - const auto to_not_start_node = total_nodes - unstarted_nodes; + const auto to_not_start_node = total_nodes - unstarted_nodes - 1; for (auto &h : bindings) { for (auto &inst : h.instances) { bool is_bios = inst.name == "bios"; From dd4d3a476cad1ffe8b13e09f154ffb5a5607f4de Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 22 Mar 2019 23:01:01 -0500 Subject: [PATCH 35/49] Added python script handling for unstarted nodes. GH #6727. --- tests/Cluster.py | 31 +++++++++++++++++++++++++++++++ tests/Node.py | 27 ++++++++++++++------------- 2 files changed, 45 insertions(+), 13 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 8665deffb32..debfa1464cd 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -50,6 +50,7 @@ def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888 """ self.accounts={} self.nodes={} + self.unstartedNodes=[] self.localCluster=localCluster self.wallet=None self.walletd=walletd @@ -379,6 +380,9 @@ def connectGroup(group, producerNodes, bridgeNodes) : self.nodes=nodes + if unstartedNodes > 0: + self.unstartedNodes=self.discoverUnstartedLocalNodes(unstartedNodes, totalNodes) + if onlyBios: biosNode=Node(Cluster.__BiosHost, Cluster.__BiosPort, walletMgr=self.walletMgr) if not biosNode.checkPulse(): @@ -645,6 +649,16 @@ def getNode(self, nodeId=0, exitOnError=True): def getNodes(self): return self.nodes + def launchUnstarted(self, numToLaunch=1, cachePopen=False): + assert(isinstance(numToLaunch, int)) + assert(numToLaunch>0) + launchList=self.unstartedNodes[:numToLaunch] + del self.unstartedNodes[:numToLaunch] + for node in launchList: + # the node number is indexed off of the started nodes list + node.launchUnstarted(len(self.nodes), cachePopen=cachePopen) + self.nodes.append(node) + # Spread funds across accounts with transactions spread through cluster nodes. # Validate transactions are synchronized on root node def spreadFunds(self, source, accounts, amount=1): @@ -1485,6 +1499,23 @@ def createAccounts(self, creator, waitForTransBlock=True, stakedDeposit=1000): return True + def discoverUnstartedLocalNodes(self, unstartedNodes, totalNodes): + unstarted=[] + firstUnstartedNode=totalNodes-unstartedNodes + for nodeId in range(firstUnstartedNode, totalNodes): + unstarted.append(self.discoverUnstartedLocalNode(nodeId)) + return unstarted + + def discoverUnstartedLocalNode(self, nodeId): + startFile=Node.unstartedFile(nodeId) + with open(startFile, 'r') as file: + cmd=file.read() + Utils.Print("unstarted local node cmd: %s" % (cmd)) + p=re.compile(r'^\s*(\w+)\s*=\s*([^\s](?:.*[^\s])?)\s*$') + instance=Node(self.host, port=self.port+nodeId, pid=None, cmd=cmd, walletMgr=self.walletMgr, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) + if Utils.Debug: Utils.Print("Unstarted Node>", instance) + return instance + def getInfos(self, silentErrors=False, exitOnError=False): infos=[] for node in self.nodes: diff --git a/tests/Node.py b/tests/Node.py index 77c3157b5dc..7b3259ece53 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -62,7 +62,7 @@ def eosClientArgs(self): def __str__(self): #return "Host: %s, Port:%d, Pid:%s, Cmd:\"%s\"" % (self.host, self.port, self.pid, self.cmd) - return "Host: %s, Port:%d" % (self.host, self.port) + return "Host: %s, Port:%d, Pid:%s" % (self.host, self.port, self.pid) @staticmethod def validateTransaction(trans): @@ -1095,6 +1095,8 @@ def processCurlCmd(self, resource, command, payload, silentErrors=True, exitOnEr if Utils.Debug: end=time.perf_counter() Utils.Print("cmd Duration: %.3f sec" % (end-start)) + printReturn=json.dumps(rtn) if returnType==ReturnType.json else rtn + Utils.Print("cmd returned: %s" % (printReturn)) except subprocess.CalledProcessError as ex: if not silentErrors: end=time.perf_counter() @@ -1241,12 +1243,12 @@ def myFunc(): self.killed=True return True - def interruptAndVerifyExitStatus(self): + def interruptAndVerifyExitStatus(self, timeout=15): if Utils.Debug: Utils.Print("terminating node: %s" % (self.cmd)) assert self.popenProc is not None, "node: \"%s\" does not have a popenProc, this may be because it is only set after a relaunch." % (self.cmd) self.popenProc.send_signal(signal.SIGINT) try: - outs, _ = self.popenProc.communicate(timeout=15) + outs, _ = self.popenProc.communicate(timeout=timeout) assert self.popenProc.returncode == 0, "Expected terminating \"%s\" to have an exit status of 0, but got %d" % (self.cmd, self.popenProc.returncode) except subprocess.TimeoutExpired: Utils.errorExit("Terminate call failed on node: %s" % (self.cmd)) @@ -1376,18 +1378,17 @@ def isNodeAlive(): self.killed=False return True - def launchUnstarted(self, nodeId, cachePopen=False): + @staticmethod + def unstartedFile(nodeId): + assert(isinstance(nodeId, int)) startFile=Utils.getNodeDataDir(nodeId, "start.cmd") if not os.path.exists(startFile): - Utils.Print("Cannot launch unstarted process since %s file does not exist" % startFile) - return False - - with open(startFile, 'r') as file: - cmd=file.read() - Utils.Print("launchUnstarted cmd: %s" % (cmd)) + Utils.errorExit("Cannot find unstarted node since %s file does not exist" % startFile) + return startFile - self.launchCmd(cmd, nodeId, cachePopen) - return True + def launchUnstarted(self, nodeId, cachePopen=False): + Utils.Print("launchUnstarted cmd: %s" % (self.cmd)) + self.launchCmd(self.cmd, nodeId, cachePopen) def launchCmd(self, cmd, nodeId, cachePopen=False): dataDir=Utils.getNodeDataDir(nodeId) @@ -1401,7 +1402,7 @@ def launchCmd(self, cmd, nodeId, cachePopen=False): if cachePopen: self.popenProc=popen self.pid=popen.pid - if Utils.Debug: Utils.Print("restart Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) + if Utils.Debug: Utils.Print("start Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) def trackCmdTransaction(self, trans, ignoreNonTrans=False): if trans is None: From 03c2eaa624c45de9616d522add7be8fc9a13e3e0 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 22 Mar 2019 23:11:05 -0500 Subject: [PATCH 36/49] Added starting up unstarted nodes and verifying catchup. GH #6727. --- tests/nodeos_startup_catchup.py | 78 ++++++++++++++++++++++++++++++--- 1 file changed, 73 insertions(+), 5 deletions(-) diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index da75a72b23b..bc73392c702 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -5,6 +5,7 @@ import time from Cluster import Cluster from WalletMgr import WalletMgr +from Node import BlockType from Node import Node from TestHelper import AppArgs from TestHelper import TestHelper @@ -31,7 +32,7 @@ appArgs=AppArgs() extraArgs = appArgs.add(flag="--catchup-count", type=int, help="How many catchup-nodes to launch", default=10) -extraArgs = appArgs.add(flag="--txn-gen-nodes", type=int, help="How many transaction generator nodes", default=4) +extraArgs = appArgs.add(flag="--txn-gen-nodes", type=int, help="How many transaction generator nodes", default=2) args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", "-p","--p2p-plugin","--wallet-port"}, applicationSpecificArgs=appArgs) Utils.Debug=args.v @@ -45,7 +46,7 @@ killAll=args.clean_run p2pPlugin=args.p2p_plugin walletPort=args.wallet_port -catchupCount=args.catchup_count +catchupCount=args.catchup_count if args.catchup_count > 0 else 1 totalNodes=startedNonProdNodes+pnodes+catchupCount walletMgr=WalletMgr(True, port=walletPort) @@ -69,7 +70,6 @@ Print("Stand up cluster") if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=pnodes, totalNodes=totalNodes, totalProducers=pnodes*prodCount, p2pPlugin=p2pPlugin, useBiosBootFile=False, specificExtraNodeosArgs=specificExtraNodeosArgs, unstartedNodes=catchupCount, loadSystemContract=False) is False: - Utils.cmdError("launcher") Utils.errorExit("Failed to stand up eos cluster.") Print("Validating system accounts after bootstrap") @@ -84,11 +84,79 @@ for genNum in range(0, len(txnGenNodes)): salt="%d" % genNum - txnGenNodes[genNum].txnGenStart(salt, 1000, 200) + txnGenNodes[genNum].txnGenStart(salt, 1500, 150) + time.sleep(1) + + node0=cluster.getNode(0) + + def lib(node): + return node.getBlockNum(BlockType.lib) + + def head(node): + return node.getBlockNum(BlockType.head) time.sleep(10) + retryCountMax=100 + for catchup_num in range(0, catchupCount): + lastLibNum=lib(node0) + lastHeadNum=head(node0) + lastCatchupLibNum=None + + cluster.launchUnstarted(cachePopen=True) + retryCount=0 + # verify that production node is advancing (sanity check) + while lib(node0)<=lastLibNum: + time.sleep(4) + retryCount+=1 + # give it some more time if the head is still moving forward + if retryCount>=20 or head(node0)<=lastHeadNum: + Utils.errorExit("Node 0 failing to advance lib. Was %s, now %s." % (lastLibNum, lib(node0))) + if Utils.Debug: Utils.Print("Node 0 head was %s, now %s. Waiting for lib to advance" % (lastLibNum, lib(node0))) + lastHeadNum=head(node0) + + catchupNode=cluster.getNodes()[-1] + time.sleep(9) + lastCatchupLibNum=lib(catchupNode) + lastCatchupHeadNum=head(catchupNode) + retryCount=0 + while lib(catchupNode)<=lastCatchupLibNum: + time.sleep(5) + retryCount+=1 + # give it some more time if the head is still moving forward + if retryCount>=100 or head(catchupNode)<=lastCatchupHeadNum: + Utils.errorExit("Catchup Node %s failing to advance lib. Was %s, now %s." % + (cluster.getNodes().index(catchupNode), lastCatchupLibNum, lib(catchupNode))) + if Utils.Debug: Utils.Print("Catchup Node %s head was %s, now %s. Waiting for lib to advance" % (cluster.getNodes().index(catchupNode), lastCatchupLibNum, lib(catchupNode))) + lastCatchupHeadNum=head(catchupNode) + + retryCount=0 + lastLibNum=lib(node0) + trailingLibNum=lastLibNum-lib(catchupNode) + lastHeadNum=head(node0) + libNotMovingCount=0 + while trailingLibNum>0: + delay=5 + time.sleep(delay) + libMoving=lib(catchupNode)>lastCatchupLibNum + if libMoving: + trailingLibNum=lastLibNum-lib(catchupNode) + libNotMovingCount=0 + else: + libNotMovingCount+=1 + if Utils.Debug and libNotMovingCount%10==0: + Utils.Print("Catchup node %s lib has not moved for %s seconds, lib is %s" % + (cluster.getNodes().index(catchupNode), (delay*libNotMovingCount), lib(catchupNode))) + retryCount+=1 + # give it some more time if the head is still moving forward + if retryCount>=retryCountMax or head(catchupNode)<=lastCatchupHeadNum or libNotMovingCount>100: + Utils.errorExit("Catchup Node %s failing to advance lib along with node 0. Catchup node lib is %s, node 0 lib is %s." % + (cluster.getNodes().index(catchupNode), lib(catchupNode), lastLibNum)) + if Utils.Debug: Utils.Print("Catchup Node %s head is %s, node 0 head is %s. Waiting for lib to advance from %s to %s" % (cluster.getNodes().index(catchupNode), head(catchupNode), head(node0), lib(catchupNode), lastLibNum)) + lastCatchupHeadNum=head(catchupNode) + + catchupNode.interruptAndVerifyExitStatus(60) + retryCountMax*=3 - testSuccessful=True finally: From fef0d2acb7e19193678d28692a9943cc95267f15 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 22 Mar 2019 23:15:08 -0500 Subject: [PATCH 37/49] Changed api to return a json status to indicate what happened. GH #6727. --- .../txn_test_gen_plugin.cpp | 25 ++++++++++++------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp index 780127efc15..670114ea85c 100755 --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -28,9 +28,13 @@ using namespace eosio::testing; namespace eosio { namespace detail { struct txn_test_gen_empty {}; + struct txn_test_gen_status { + string status; + }; }} FC_REFLECT(eosio::detail::txn_test_gen_empty, ); +FC_REFLECT(eosio::detail::txn_test_gen_status, (status)); namespace eosio { @@ -53,8 +57,8 @@ using io_work_t = boost::asio::executor_work_guard(); \ - api_handle->call_name(vs.at(0).as(), vs.at(1).as(), vs.at(2).as()); \ - eosio::detail::txn_test_gen_empty result; + auto status = api_handle->call_name(vs.at(0).as(), vs.at(1).as(), vs.at(2).as()); \ + eosio::detail::txn_test_gen_status result = { status }; #define INVOKE_V_R_R(api_handle, call_name, in_param0, in_param1) \ const auto& vs = fc::json::json::from_string(body).as(); \ @@ -179,7 +183,7 @@ struct txn_test_gen_plugin_impl { trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountT, owner_auth, active_auth}); } - trx.expiration = cc.head_block_time() + fc::seconds(30); + trx.expiration = cc.head_block_time() + fc::seconds(180); trx.set_reference_block(cc.head_block_id()); trx.sign(creator_priv_key, chainid); trxs.emplace_back(std::move(trx)); @@ -249,7 +253,7 @@ struct txn_test_gen_plugin_impl { trx.actions.push_back(act); } - trx.expiration = cc.head_block_time() + fc::seconds(30); + trx.expiration = cc.head_block_time() + fc::seconds(180); trx.set_reference_block(cc.head_block_id()); trx.max_net_usage_words = 5000; trx.sign(txn_test_receiver_C_priv_key, chainid); @@ -263,15 +267,17 @@ struct txn_test_gen_plugin_impl { push_transactions(std::move(trxs), next); } - void start_generation(const std::string& salt, const uint64_t& period, const uint64_t& batch_size) { + string start_generation(const std::string& salt, const uint64_t& period, const uint64_t& batch_size) { + ilog("Starting transaction test plugin"); if(running) - throw fc::exception(fc::invalid_operation_exception_code); + return "start_generation already running"; if(period < 1 || period > 2500) - throw fc::exception(fc::invalid_operation_exception_code); + return "period must be between 1 and 2500"; if(batch_size < 1 || batch_size > 250) - throw fc::exception(fc::invalid_operation_exception_code); + return "batch_size must be between 1 and 250"; if(batch_size & 1) - throw fc::exception(fc::invalid_operation_exception_code); + return "batch_size must be even"; + ilog("Starting transaction test plugin valid"); running = true; @@ -312,6 +318,7 @@ struct txn_test_gen_plugin_impl { boost::asio::post( *gen_ioc, [this]() { arm_timer(boost::asio::high_resolution_timer::clock_type::now()); }); + return "success"; } void arm_timer(boost::asio::high_resolution_timer::time_point s) { From 640257efc443e55a17984807cda4710fb34ce96e Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 23 Mar 2019 00:03:42 -0500 Subject: [PATCH 38/49] Added nodeos_startup_catchup to long running tests. GH #6727. --- tests/CMakeLists.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index ae9b36bcd68..0eea67cbce3 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -35,6 +35,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/distributed-transactions-test.py ${CM configure_file(${CMAKE_CURRENT_SOURCE_DIR}/distributed-transactions-remote-test.py ${CMAKE_CURRENT_BINARY_DIR}/distributed-transactions-remote-test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/sample-cluster-map.json ${CMAKE_CURRENT_BINARY_DIR}/sample-cluster-map.json COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/restart-scenarios-test.py ${CMAKE_CURRENT_BINARY_DIR}/restart-scenarios-test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_startup_catchup.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_startup_catchup.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_forked_chain_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_forked_chain_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_run_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_remote_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_run_remote_test.py COPYONLY) @@ -106,6 +107,8 @@ set_property(TEST nodeos_voting_bnet_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_avail_ram.py -v --wallet-port 9904 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_startup_catchup_lr_test COMMAND tests/nodeos_startup_catchup.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_startup_catchup_lr_test PROPERTY LABELS long_running_tests) if(ENABLE_COVERAGE_TESTING) From fcd01c82b01574f474ceaac068d84882a9cff4bb Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 23 Mar 2019 15:55:56 -0500 Subject: [PATCH 39/49] Fixed interruptAndVerifyExitStatus to track that it was killed. GH #6727. --- tests/Node.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/Node.py b/tests/Node.py index 7b3259ece53..3e31c396d5f 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1253,6 +1253,10 @@ def interruptAndVerifyExitStatus(self, timeout=15): except subprocess.TimeoutExpired: Utils.errorExit("Terminate call failed on node: %s" % (self.cmd)) + # mark node as killed + self.pid=None + self.killed=True + def verifyAlive(self, silent=False): if not silent and Utils.Debug: Utils.Print("Checking if node(pid=%s) is alive(killed=%s): %s" % (self.pid, self.killed, self.cmd)) if self.killed or self.pid is None: @@ -1318,7 +1322,7 @@ def getNextCleanProductionCycle(self, trans): # TBD: make nodeId an internal property # pylint: disable=too-many-locals - def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTimeout, addOrSwapFlags=None, cachePopen=False): + def relaunch(self, nodeId, chainArg=None, newChain=False, timeout=Utils.systemWaitTimeout, addOrSwapFlags=None, cachePopen=False): assert(self.pid is None) assert(self.killed) From 2790d66b81dd021b335249dcb360ddf3eea5d2ba Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 23 Mar 2019 16:00:06 -0500 Subject: [PATCH 40/49] Added catchup after relaunching the catchup node and refactored test using framework methods. GH #6727. --- tests/nodeos_startup_catchup.py | 104 ++++++++++++-------------------- 1 file changed, 39 insertions(+), 65 deletions(-) diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index bc73392c702..c7f1fa80ae4 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -20,9 +20,11 @@ # txn_test_gen_plugin. Each non-producing node starts generating transactions and sending them # to the producing node. # 1) After 10 seconds a new node is started. -# 2) 10 seconds later, that node is checked to see if it has caught up to the producing node and -# that node is killed and a new node is started. -# 3) Repeat step 2, <--catchup-count - 1> more times +# 2) the node is allowed to catch up to the producing node +# 3) that node is killed +# 4) restart the node +# 5) the node is allowed to catch up to the producing node +# 3) Repeat steps 2-5, <--catchup-count - 1> more times ############################################################### Print=Utils.Print @@ -80,14 +82,6 @@ txnGenNodes.append(cluster.getNode(nodeNum)) txnGenNodes[0].txnGenCreateTestAccounts(cluster.eosioAccount.name, cluster.eosioAccount.activePrivateKey) - time.sleep(20) - - for genNum in range(0, len(txnGenNodes)): - salt="%d" % genNum - txnGenNodes[genNum].txnGenStart(salt, 1500, 150) - time.sleep(1) - - node0=cluster.getNode(0) def lib(node): return node.getBlockNum(BlockType.lib) @@ -95,67 +89,47 @@ def lib(node): def head(node): return node.getBlockNum(BlockType.head) - time.sleep(10) - retryCountMax=100 - for catchup_num in range(0, catchupCount): - lastLibNum=lib(node0) - lastHeadNum=head(node0) - lastCatchupLibNum=None + node0=cluster.getNode(0) + blockNum=head(node0) + node0.waitForBlock(blockNum, blockType=BlockType.lib) + + for genNum in range(0, len(txnGenNodes)): + salt="%d" % genNum + txnGenNodes[genNum].txnGenStart(salt, 1500, 150) + time.sleep(1) + + blockNum=head(node0) + node0.waitForBlock(blockNum+20) + + twoRounds=21*2*12 + for catchup_num in range(0, catchupCount): cluster.launchUnstarted(cachePopen=True) - retryCount=0 - # verify that production node is advancing (sanity check) - while lib(node0)<=lastLibNum: - time.sleep(4) - retryCount+=1 - # give it some more time if the head is still moving forward - if retryCount>=20 or head(node0)<=lastHeadNum: - Utils.errorExit("Node 0 failing to advance lib. Was %s, now %s." % (lastLibNum, lib(node0))) - if Utils.Debug: Utils.Print("Node 0 head was %s, now %s. Waiting for lib to advance" % (lastLibNum, lib(node0))) - lastHeadNum=head(node0) + lastLibNum=lib(node0) + # verify producer lib is still advancing + node0.waitForBlock(lastLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) catchupNode=cluster.getNodes()[-1] - time.sleep(9) + catchupNodeNum=cluster.getNodes().index(catchupNode) lastCatchupLibNum=lib(catchupNode) - lastCatchupHeadNum=head(catchupNode) - retryCount=0 - while lib(catchupNode)<=lastCatchupLibNum: - time.sleep(5) - retryCount+=1 - # give it some more time if the head is still moving forward - if retryCount>=100 or head(catchupNode)<=lastCatchupHeadNum: - Utils.errorExit("Catchup Node %s failing to advance lib. Was %s, now %s." % - (cluster.getNodes().index(catchupNode), lastCatchupLibNum, lib(catchupNode))) - if Utils.Debug: Utils.Print("Catchup Node %s head was %s, now %s. Waiting for lib to advance" % (cluster.getNodes().index(catchupNode), lastCatchupLibNum, lib(catchupNode))) - lastCatchupHeadNum=head(catchupNode) - - retryCount=0 - lastLibNum=lib(node0) - trailingLibNum=lastLibNum-lib(catchupNode) - lastHeadNum=head(node0) - libNotMovingCount=0 - while trailingLibNum>0: - delay=5 - time.sleep(delay) - libMoving=lib(catchupNode)>lastCatchupLibNum - if libMoving: - trailingLibNum=lastLibNum-lib(catchupNode) - libNotMovingCount=0 - else: - libNotMovingCount+=1 - if Utils.Debug and libNotMovingCount%10==0: - Utils.Print("Catchup node %s lib has not moved for %s seconds, lib is %s" % - (cluster.getNodes().index(catchupNode), (delay*libNotMovingCount), lib(catchupNode))) - retryCount+=1 - # give it some more time if the head is still moving forward - if retryCount>=retryCountMax or head(catchupNode)<=lastCatchupHeadNum or libNotMovingCount>100: - Utils.errorExit("Catchup Node %s failing to advance lib along with node 0. Catchup node lib is %s, node 0 lib is %s." % - (cluster.getNodes().index(catchupNode), lib(catchupNode), lastLibNum)) - if Utils.Debug: Utils.Print("Catchup Node %s head is %s, node 0 head is %s. Waiting for lib to advance from %s to %s" % (cluster.getNodes().index(catchupNode), head(catchupNode), head(node0), lib(catchupNode), lastLibNum)) - lastCatchupHeadNum=head(catchupNode) + # verify lib is advancing (before we wait for it to have to catchup with producer) + catchupNode.waitForBlock(lastCatchupLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + + numBlocksToCatchup=(lastLibNum-lastCatchupLibNum-1)+twoRounds + catchupNode.waitForBlock(lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) catchupNode.interruptAndVerifyExitStatus(60) - retryCountMax*=3 + + catchupNode.relaunch(catchupNodeNum) + lastCatchupLibNum=lib(catchupNode) + # verify catchup node is advancing to producer + catchupNode.waitForBlock(lastCatchupLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + + lastLibNum=lib(node0) + # verify producer lib is still advancing + node0.waitForBlock(lastLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + # verify catchup node is advancing to producer + catchupNode.waitForBlock(lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) testSuccessful=True From 722ac062fc3cc1469328bfc273c6b0a2964790a6 Mon Sep 17 00:00:00 2001 From: UMU Date: Mon, 25 Mar 2019 15:31:23 +0800 Subject: [PATCH 41/49] Improve for MongoDB sharding --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 29 ++++++++++++--------- 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 8131b6a2bb2..2ba100bdc84 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -1462,39 +1462,44 @@ void mongo_db_plugin_impl::init() { } try { + // Due to the vast amounts of data, we suggest MongoDB administrators: + // 1. enableSharding database (default to EOS) + // 2. shardCollection: blocks, action_traces, transaction_traces, especially action_traces + // 3. Use compound index with shard key (default to _id), to improve query performance. + // blocks indexes auto blocks = mongo_conn[db_name][blocks_col]; - blocks.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); - blocks.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); + blocks.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1, "_id" : 1 })xxx" )); + blocks.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1, "_id" : 1 })xxx" )); auto block_states = mongo_conn[db_name][block_states_col]; - block_states.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); - block_states.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); + block_states.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1, "_id" : 1 })xxx" )); + block_states.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1, "_id" : 1 })xxx" )); // accounts indexes - accounts.create_index( bsoncxx::from_json( R"xxx({ "name" : 1 })xxx" )); + accounts.create_index( bsoncxx::from_json( R"xxx({ "name" : 1, "_id" : 1 })xxx" )); // transactions indexes auto trans = mongo_conn[db_name][trans_col]; - trans.create_index( bsoncxx::from_json( R"xxx({ "trx_id" : 1 })xxx" )); + trans.create_index( bsoncxx::from_json( R"xxx({ "trx_id" : 1, "_id" : 1 })xxx" )); auto trans_trace = mongo_conn[db_name][trans_traces_col]; - trans_trace.create_index( bsoncxx::from_json( R"xxx({ "id" : 1 })xxx" )); + trans_trace.create_index( bsoncxx::from_json( R"xxx({ "id" : 1, "_id" : 1 })xxx" )); // action traces indexes auto action_traces = mongo_conn[db_name][action_traces_col]; - action_traces.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); + action_traces.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1, "_id" : 1 })xxx" )); // pub_keys indexes auto pub_keys = mongo_conn[db_name][pub_keys_col]; - pub_keys.create_index( bsoncxx::from_json( R"xxx({ "account" : 1, "permission" : 1 })xxx" )); - pub_keys.create_index( bsoncxx::from_json( R"xxx({ "public_key" : 1 })xxx" )); + pub_keys.create_index( bsoncxx::from_json( R"xxx({ "account" : 1, "permission" : 1, "_id" : 1 })xxx" )); + pub_keys.create_index( bsoncxx::from_json( R"xxx({ "public_key" : 1, "_id" : 1 })xxx" )); // account_controls indexes auto account_controls = mongo_conn[db_name][account_controls_col]; account_controls.create_index( - bsoncxx::from_json( R"xxx({ "controlled_account" : 1, "controlled_permission" : 1 })xxx" )); - account_controls.create_index( bsoncxx::from_json( R"xxx({ "controlling_account" : 1 })xxx" )); + bsoncxx::from_json( R"xxx({ "controlled_account" : 1, "controlled_permission" : 1, "_id" : 1 })xxx" )); + account_controls.create_index( bsoncxx::from_json( R"xxx({ "controlling_account" : 1, "_id" : 1 })xxx" )); } catch (...) { handle_mongo_exception( "create indexes", __LINE__ ); From bb0646b62d2fbdc17db38c986b364250e880ff52 Mon Sep 17 00:00:00 2001 From: Joseph J Guerra <8146030+josephjguerra@users.noreply.github.com> Date: Tue, 26 Mar 2019 14:06:52 -0400 Subject: [PATCH 42/49] Create CONTRIBUTING.md --- CONTRIBUTING.md | 148 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000000..40ecbf9cea8 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,148 @@ +# Contributing to eos + +Interested in contributing? That's awesome! Here are some guidelines to get started quickly and easily: + +- [Reporting An Issue](#reporting-an-issue) + - [Bug Reports](#bug-reports) + - [Feature Requests](#feature-requests) + - [Change Requests](#change-requests) +- [Working on eos](#working-on-eos) + - [Feature Branches](#feature-branches) + - [Submitting Pull Requests](#submitting-pull-requests) + - [Testing and Quality Assurance](#testing-and-quality-assurance) +- [Conduct](#conduct) +- [Contributor License & Acknowledgments](#contributor-license--acknowledgments) +- [References](#references) + +## Reporting An Issue + +If you're about to raise an issue because you think you've found a problem with eos, or you'd like to make a request for a new feature in the codebase, or any other reason… please read this first. + +The GitHub issue tracker is the preferred channel for [bug reports](#bug-reports), [feature requests](#feature-requests), and [submitting pull requests](#submitting-pull-requests), but please respect the following restrictions: + +* Please **search for existing issues**. Help us keep duplicate issues to a minimum by checking to see if someone has already reported your problem or requested your idea. + +* Please **be civil**. Keep the discussion on topic and respect the opinions of others. See also our [Contributor Code of Conduct](#conduct). + +### Bug Reports + +A bug is a _demonstrable problem_ that is caused by the code in the repository. Good bug reports are extremely helpful - thank you! + +Guidelines for bug reports: + +1. **Use the GitHub issue search** — check if the issue has already been + reported. + +1. **Check if the issue has been fixed** — look for [closed issues in the + current milestone](https://github.com/EOSIO/eos/issues?q=is%3Aissue+is%3Aclosed) or try to reproduce it + using the latest `develop` branch. + +A good bug report shouldn't leave others needing to chase you up for more information. Be sure to include the details of your environment and relevant tests that demonstrate the failure. + +[Report a bug](https://github.com/EOSIO/eos/issues/new?title=Bug%3A) + +### Feature Requests + +Feature requests are welcome. Before you submit one be sure to have: + +1. **Use the GitHub search** and check the feature hasn't already been requested. +1. Take a moment to think about whether your idea fits with the scope and aims of the project. +1. Remember, it's up to *you* to make a strong case to convince the project's leaders of the merits of this feature. Please provide as much detail and context as possible, this means explaining the use case and why it is likely to be common. + +### Change Requests + +Change requests cover both architectural and functional changes to how eos works. If you have an idea for a new or different dependency, a refactor, or an improvement to a feature, etc - please be sure to: + +1. **Use the GitHub search** and check someone else didn't get there first +1. Take a moment to think about the best way to make a case for, and explain what you're thinking. Are you sure this shouldn't really be + a [bug report](#bug-reports) or a [feature request](#feature-requests)? Is it really one idea or is it many? What's the context? What problem are you solving? Why is what you are suggesting better than what's already there? + +## Working on eos + +Code contributions are welcome and encouraged! If you are looking for a good place to start, check out the [good first issue](https://github.com/EOSIO/eos/labels/good%20first%20issue) label in GitHub issues. + +Also, please follow these guidelines when submitting code: + +### Feature Branches + +To get it out of the way: + +- **[develop](https://github.com/EOSIO/eos/tree/develop)** is the development branch. All work on the next release happens here so you should generally branch off `develop`. Do **NOT** use this branch for a production site. +- **[master](https://github.com/EOSIO/eos/tree/master)** contains the latest release of eos. This branch may be used in production. Do **NOT** use this branch to work on eos's source. + +### Submitting Pull Requests + +Pull requests are awesome. If you're looking to raise a PR for something which doesn't have an open issue, please think carefully about [raising an issue](#reporting-an-issue) which your PR can close, especially if you're fixing a bug. This makes it more likely that there will be enough information available for your PR to be properly tested and merged. + +### Testing and Quality Assurance + +Never underestimate just how useful quality assurance is. If you're looking to get involved with the code base and don't know where to start, checking out and testing a pull request is one of the most useful things you could do. + +Essentially, [check out the latest develop branch](#working-on-eos), take it for a spin, and if you find anything odd, please follow the [bug report guidelines](#bug-reports) and let us know! + +## Conduct + +While contributing, please be respectful and constructive, so that participation in our project is a positive experience for everyone. + +Examples of behavior that contributes to creating a positive environment include: +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior include: +- The use of sexualized language or imagery and unwelcome sexual attention or advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others’ private information, such as a physical or electronic address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a professional setting + +## Contributor License & Acknowledgments + +Whenever you make a contribution to this project, you license your contribution under the same terms as set out in LICENSE, and you represent and warrant that you have the right to license your contribution under those terms. Whenever you make a contribution to this project, you also certify in the terms of the Developer’s Certificate of Origin set out below: + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +## References + +* Overall CONTRIB adapted from https://github.com/mathjax/MathJax/blob/master/CONTRIBUTING.md +* Conduct section adapted from the Contributor Covenant, version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html From 7faca6c888a2e90c8cf0bf74bec41852a342d3f1 Mon Sep 17 00:00:00 2001 From: Joseph J Guerra <8146030+josephjguerra@users.noreply.github.com> Date: Tue, 26 Mar 2019 14:07:20 -0400 Subject: [PATCH 43/49] Update LICENSE --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 1516b96cbdf..31dee1d933c 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2019 Respective Authors all rights reserved. +Copyright (c) 2017-2019 block.one all rights reserved. The MIT License From cee6dea42fce72c3d3f313a4a7438acfe8d9dc43 Mon Sep 17 00:00:00 2001 From: Joseph J Guerra <8146030+josephjguerra@users.noreply.github.com> Date: Tue, 26 Mar 2019 14:10:05 -0400 Subject: [PATCH 44/49] Update README.md --- README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/README.md b/README.md index e22a2b2cebc..7bce246fbc3 100644 --- a/README.md +++ b/README.md @@ -105,3 +105,17 @@ EOSIO currently supports the following operating systems: ## Getting Started Instructions detailing the process of getting the software, building it, running a simple test network that produces blocks, account creation and uploading a sample contract to the blockchain can be found in [Getting Started](https://developers.eos.io/eosio-home/docs) on the [EOSIO Developer Portal](https://developers.eos.io). + +## Contributing + +[Contributing Guide](./CONTRIBUTING.md) + +[Code of Conduct](./CONTRIBUTING.md#conduct) + +## License + +[MIT](./LICENSE) + +## Important + +See LICENSE for copyright and license terms. Block.one makes its contribution on a voluntary basis as a member of the EOSIO community and is not responsible for ensuring the overall performance of the software or any related applications. We make no representation, warranty, guarantee or undertaking in respect of the software or any related documentation, whether expressed or implied, including but not limited to the warranties or merchantability, fitness for a particular purpose and noninfringement. In no event shall we be liable for any claim, damages or other liability, whether in an action of contract, tort or otherwise, arising from, out of or in connection with the software or documentation or the use or other dealings in the software or documentation. Any test results or performance figures are indicative and will not reflect performance under all conditions. Any reference to any third party or third-party product, service or other resource is not an endorsement or recommendation by Block.one. We are not responsible, and disclaim any and all responsibility and liability, for your use of or reliance on any of these resources. Third-party resources may be updated, changed or terminated at any time, so the information here may be out of date or inaccurate. From ccd6e53dc44b76c5351424d1896955574e230248 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Mar 2019 12:19:03 -0500 Subject: [PATCH 45/49] Attempt to make comment clearer --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 2ba100bdc84..0adb1670068 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -1462,10 +1462,10 @@ void mongo_db_plugin_impl::init() { } try { - // Due to the vast amounts of data, we suggest MongoDB administrators: + // MongoDB administrators (to enable sharding) : // 1. enableSharding database (default to EOS) // 2. shardCollection: blocks, action_traces, transaction_traces, especially action_traces - // 3. Use compound index with shard key (default to _id), to improve query performance. + // 3. Compound index with shard key (default to _id below), to improve query performance. // blocks indexes auto blocks = mongo_conn[db_name][blocks_col]; From e019da51863d3d8dc53f46918f3e5fa9abaf10ae Mon Sep 17 00:00:00 2001 From: Joseph J Guerra <8146030+josephjguerra@users.noreply.github.com> Date: Thu, 28 Mar 2019 13:28:47 -0400 Subject: [PATCH 46/49] Update LICENSE --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 31dee1d933c..22d36d65db1 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2017-2019 block.one all rights reserved. +Copyright (c) 2017-2019 block.one and its contributors. All rights reserved. The MIT License From 00e96e1711c2c397dff5ccb70dca542a450d431f Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Thu, 28 Mar 2019 17:15:50 -0400 Subject: [PATCH 47/49] Python 36 for centos7 and amazonlinux1 (#7005) --- .buildkite/pipeline.yml | 44 +++++++++++++++++------------------ scripts/eosio_build_amazon.sh | 6 ++--- scripts/eosio_build_centos.sh | 12 +++++----- 3 files changed, 31 insertions(+), 31 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 57ce31e5a6c..f83249df044 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -17,7 +17,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job timeout: 60 @@ -38,7 +38,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job timeout: 60 @@ -59,7 +59,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job timeout: 60 @@ -80,7 +80,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" workdir: /data/job timeout: 60 @@ -101,7 +101,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job timeout: 60 @@ -122,7 +122,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" workdir: /data/job timeout: 60 @@ -173,7 +173,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job timeout: 60 @@ -193,7 +193,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job timeout: 60 @@ -214,7 +214,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job timeout: 60 @@ -234,7 +234,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job timeout: 60 @@ -255,7 +255,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job timeout: 60 @@ -275,7 +275,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job timeout: 60 @@ -296,7 +296,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" workdir: /data/job timeout: 60 @@ -316,7 +316,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" workdir: /data/job timeout: 60 @@ -337,7 +337,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job timeout: 60 @@ -357,7 +357,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job timeout: 60 @@ -378,7 +378,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" workdir: /data/job timeout: 60 @@ -398,7 +398,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" workdir: /data/job timeout: 60 @@ -501,7 +501,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job env: OS: "ubuntu-16.04" @@ -527,7 +527,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job env: OS: "ubuntu-18.04" @@ -560,7 +560,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" workdir: /data/job env: OS: "fc27" @@ -593,7 +593,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job env: OS: "el7" diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 7a16e4486e9..ff655496a7b 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -8,13 +8,13 @@ DISK_AVAIL_KB=$( df . | tail -1 | awk '{print $4}' ) DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) -if [[ "${OS_NAME}" == "Amazon Linux AMI" ]]; then +if [[ "${OS_NAME}" == "Amazon Linux AMI" ]]; then # Amazonlinux1 DEP_ARRAY=( sudo procps util-linux which gcc72 gcc72-c++ autoconf automake libtool make doxygen graphviz \ - bzip2 bzip2-devel openssl-devel gmp gmp-devel libstdc++72 python27 python27-devel python34 python34-devel \ + bzip2 bzip2-devel openssl-devel gmp gmp-devel libstdc++72 python27 python27-devel python36 python36-devel \ libedit-devel ncurses-devel swig wget file libcurl-devel libusb1-devel ) -else +else # Amazonlinux2 DEP_ARRAY=( git procps-ng util-linux gcc gcc-c++ autoconf automake libtool make bzip2 \ bzip2-devel openssl-devel gmp-devel libstdc++ libcurl-devel libusbx-devel \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 1c1e97b2fab..8e7044001ab 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -121,7 +121,7 @@ printf "\\n" DEP_ARRAY=( git autoconf automake libtool make bzip2 doxygen graphviz \ bzip2-devel openssl-devel gmp-devel \ - ocaml libicu-devel python python-devel python33 \ + ocaml libicu-devel python python-devel rh-python36 \ gettext-devel file sudo libusbx-devel libcurl-devel ) COUNT=1 @@ -160,10 +160,10 @@ else printf " - No required YUM dependencies to install.\\n\\n" fi -if [ -d /opt/rh/python33 ]; then - printf "Enabling python33...\\n" - source /opt/rh/python33/enable || exit 1 - printf " - Python33 successfully enabled!\\n" +if [ -d /opt/rh/rh-python36 ]; then + printf "Enabling python36...\\n" + source /opt/rh/rh-python36/enable || exit 1 + printf " - Python36 successfully enabled!\\n" fi printf "\\n" @@ -190,7 +190,7 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" -export CPATH="$CPATH:/opt/rh/python33/root/usr/include/python3.3m" # m on the end causes problems with boost finding python3 +export CPATH="$CPATH:/opt/rh/rh-python36/root/usr/include/python3.6m" # m on the end causes problems with boost finding python3 printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then From c33196dcdeb83e82ff14338bad7a52651b2c9544 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Thu, 28 Mar 2019 20:25:48 -0400 Subject: [PATCH 48/49] long-running image version bump (#7011) --- .buildkite/long_running_tests.yml | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml index 6383f57c392..dd0d6cbee9d 100644 --- a/.buildkite/long_running_tests.yml +++ b/.buildkite/long_running_tests.yml @@ -17,7 +17,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job timeout: 60 @@ -38,7 +38,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job timeout: 60 @@ -59,7 +59,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job timeout: 60 @@ -80,7 +80,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" workdir: /data/job timeout: 60 @@ -101,7 +101,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job timeout: 60 @@ -122,7 +122,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" workdir: /data/job timeout: 60 @@ -172,7 +172,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job timeout: 90 @@ -192,7 +192,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job timeout: 90 @@ -212,7 +212,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job timeout: 90 @@ -232,7 +232,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" workdir: /data/job timeout: 90 @@ -252,7 +252,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job timeout: 90 @@ -272,7 +272,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" workdir: /data/job timeout: 90 From f6c9d81858fb5bd3e101c0ad72476c30f348c8cd Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Fri, 29 Mar 2019 16:57:36 -0400 Subject: [PATCH 49/49] New disk space requirements (#7023) --- scripts/eosio_build.sh | 4 ++-- scripts/eosio_build_centos.sh | 9 +++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index d3128903097..a97ceaa5058 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -30,9 +30,8 @@ # https://github.com/EOSIO/eos/blob/master/LICENSE ########################################################################## -VERSION=2.1 # Build script version +VERSION=2.2 # Build script version CMAKE_BUILD_TYPE=Release -export DISK_MIN=20 DOXYGEN=false ENABLE_COVERAGE_TESTING=false CORE_SYMBOL_NAME="SYS" @@ -75,6 +74,7 @@ export LLVM_DIR=${LLVM_ROOT}/lib/cmake/llvm export DOXYGEN_VERSION=1_8_14 export DOXYGEN_ROOT=${SRC_LOCATION}/doxygen-${DOXYGEN_VERSION} export TINI_VERSION=0.18.0 +export DISK_MIN=5 # Setup directories mkdir -p $SRC_LOCATION diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 8e7044001ab..621001d0a97 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -160,9 +160,10 @@ else printf " - No required YUM dependencies to install.\\n\\n" fi -if [ -d /opt/rh/rh-python36 ]; then +export PYTHON3PATH="/opt/rh/rh-python36" +if [ -d $PYTHON3PATH ]; then printf "Enabling python36...\\n" - source /opt/rh/rh-python36/enable || exit 1 + source $PYTHON3PATH/enable || exit 1 printf " - Python36 successfully enabled!\\n" fi @@ -190,7 +191,7 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" -export CPATH="$CPATH:/opt/rh/rh-python36/root/usr/include/python3.6m" # m on the end causes problems with boost finding python3 +export CPATH="${CPATH}:${PYTHON3PATH}/root/usr/include/python3.6m" # m on the end causes problems with boost finding python3 printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then @@ -299,7 +300,7 @@ cd .. printf "\\n" function print_instructions() { - printf "source /opt/rh/python33/enable\\n" + printf "source ${PYTHON3PATH}/enable\\n" printf "source /opt/rh/devtoolset-7/enable\\n" return 0 }