diff --git a/Dockerfile b/Dockerfile index 8a970e39e..c20b4beaf 100644 --- a/Dockerfile +++ b/Dockerfile @@ -50,6 +50,7 @@ WORKDIR /peerplays-core # Compile Peerplays RUN \ BOOST_ROOT=$HOME/boost_1_67_0 && \ + git submodule sync --recursive && \ git submodule update --init --recursive && \ mkdir build && \ mkdir build/release && \ diff --git a/libraries/app/CMakeLists.txt b/libraries/app/CMakeLists.txt index 93e540f98..d66b4052b 100644 --- a/libraries/app/CMakeLists.txt +++ b/libraries/app/CMakeLists.txt @@ -5,7 +5,6 @@ add_library( graphene_app api.cpp application.cpp database_api.cpp - impacted.cpp plugin.cpp config_util.cpp ${HEADERS} @@ -14,7 +13,7 @@ add_library( graphene_app # need to link graphene_debug_witness because plugins aren't sufficiently isolated #246 #target_link_libraries( graphene_app graphene_market_history graphene_account_history graphene_chain fc graphene_db graphene_net graphene_utilities graphene_debug_witness ) -target_link_libraries( graphene_app graphene_market_history graphene_account_history graphene_accounts_list graphene_affiliate_stats graphene_chain fc graphene_db graphene_net graphene_time graphene_utilities graphene_debug_witness graphene_bookie ) +target_link_libraries( graphene_app graphene_market_history graphene_account_history graphene_accounts_list graphene_affiliate_stats graphene_chain fc graphene_db graphene_net graphene_time graphene_utilities graphene_debug_witness graphene_bookie graphene_elasticsearch ) target_include_directories( graphene_app PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/../egenesis/include" ) diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index 1cde21cdd..aaf508092 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -581,6 +580,18 @@ namespace graphene { namespace app { start = node.operation_id; } catch(...) { return result; } + if(_app.is_plugin_enabled("elasticsearch")) { + auto es = _app.get_plugin("elasticsearch"); + if(es.get()->get_running_mode() != elasticsearch::mode::only_save) { + if(!_app.elasticsearch_thread) + _app.elasticsearch_thread= std::make_shared("elasticsearch"); + + return _app.elasticsearch_thread->async([&es, &account, &stop, &limit, &start]() { + return es->get_account_history(account, stop, limit, start); + }, "thread invoke for method " BOOST_PP_STRINGIZE(method_name)).wait(); + } + } + const auto& hist_idx = db.get_index_type(); const auto& by_op_idx = hist_idx.indices().get(); auto index_start = by_op_idx.begin(); diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index b5cd9ef12..ce2010179 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -922,7 +922,8 @@ namespace detail { std::shared_ptr _websocket_server; std::shared_ptr _websocket_tls_server; - std::map> _plugins; + std::map> _active_plugins; + std::map> _available_plugins; bool _is_finished_syncing = false; }; @@ -964,6 +965,7 @@ void application::set_program_options(boost::program_options::options_descriptio ("enable-standby-votes-tracking", bpo::value()->implicit_value(true), "Whether to enable tracking of votes of standby witnesses and committee members. " "Set it to true to provide accurate data to API clients, set to false for slightly better performance.") + ("plugins", bpo::value(), "Space-separated list of plugins to activate") ; command_line_options.add(configuration_file_options); command_line_options.add_options() @@ -1009,6 +1011,36 @@ void application::initialize(const fc::path& data_dir, const boost::program_opti std::exit(EXIT_SUCCESS); } + + std::set wanted; + if( options.count("plugins") ) + { + boost::split(wanted, options.at("plugins").as(), [](char c){return c == ' ';}); + } + else + { + wanted.insert("account_history"); + wanted.insert("market_history"); + wanted.insert("accounts_list"); + wanted.insert("affiliate_stats"); + } + wanted.insert("witness"); + wanted.insert("bookie"); + + int es_ah_conflict_counter = 0; + for (auto& it : wanted) + { + if(it == "account_history") + ++es_ah_conflict_counter; + if(it == "elasticsearch") + ++es_ah_conflict_counter; + + if(es_ah_conflict_counter > 1) { + elog("Can't start program with elasticsearch and account_history plugin at the same time"); + std::exit(EXIT_FAILURE); + } + if (!it.empty()) enable_plugin(it); + } } void application::startup() @@ -1026,7 +1058,12 @@ void application::startup() std::shared_ptr application::get_plugin(const string& name) const { - return my->_plugins[name]; + return my->_active_plugins[name]; +} + +bool application::is_plugin_enabled(const string& name) const +{ + return !(my->_active_plugins.find(name) == my->_active_plugins.end()); } net::node_ptr application::p2p_node() @@ -1059,14 +1096,21 @@ bool application::is_finished_syncing() const return my->_is_finished_syncing; } -void graphene::app::application::add_plugin(const string& name, std::shared_ptr p) +void graphene::app::application::enable_plugin(const string& name) +{ + FC_ASSERT(my->_available_plugins[name], "Unknown plugin '" + name + "'"); + my->_active_plugins[name] = my->_available_plugins[name]; + my->_active_plugins[name]->plugin_set_app(this); +} + +void graphene::app::application::add_available_plugin(std::shared_ptr p) { - my->_plugins[name] = p; + my->_available_plugins[p->plugin_name()] = p; } void application::shutdown_plugins() { - for( auto& entry : my->_plugins ) + for( auto& entry : my->_active_plugins ) entry.second->plugin_shutdown(); return; } @@ -1080,14 +1124,14 @@ void application::shutdown() void application::initialize_plugins( const boost::program_options::variables_map& options ) { - for( auto& entry : my->_plugins ) + for( auto& entry : my->_active_plugins ) entry.second->plugin_initialize( options ); return; } void application::startup_plugins() { - for( auto& entry : my->_plugins ) + for( auto& entry : my->_active_plugins ) entry.second->plugin_startup(); return; } diff --git a/libraries/app/impacted.cpp b/libraries/app/impacted.cpp deleted file mode 100644 index 08253417b..000000000 --- a/libraries/app/impacted.cpp +++ /dev/null @@ -1,315 +0,0 @@ -/* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include -#include - -namespace graphene { namespace app { - -using namespace fc; -using namespace graphene::chain; - -// TODO: Review all of these, especially no-ops -struct get_impacted_account_visitor -{ - flat_set& _impacted; - get_impacted_account_visitor( flat_set& impact ):_impacted(impact) {} - typedef void result_type; - - void operator()( const transfer_operation& op ) - { - _impacted.insert( op.to ); - } - - void operator()( const asset_claim_fees_operation& op ){} - void operator()( const limit_order_create_operation& op ) {} - void operator()( const limit_order_cancel_operation& op ) - { - _impacted.insert( op.fee_paying_account ); - } - void operator()( const call_order_update_operation& op ) {} - void operator()( const fill_order_operation& op ) - { - _impacted.insert( op.account_id ); - } - - void operator()( const account_create_operation& op ) - { - _impacted.insert( op.registrar ); - _impacted.insert( op.referrer ); - add_authority_accounts( _impacted, op.owner ); - add_authority_accounts( _impacted, op.active ); - } - - void operator()( const account_update_operation& op ) - { - _impacted.insert( op.account ); - if( op.owner ) - add_authority_accounts( _impacted, *(op.owner) ); - if( op.active ) - add_authority_accounts( _impacted, *(op.active) ); - } - - void operator()( const account_whitelist_operation& op ) - { - _impacted.insert( op.account_to_list ); - } - - void operator()( const account_upgrade_operation& op ) {} - void operator()( const account_transfer_operation& op ) - { - _impacted.insert( op.new_owner ); - } - - void operator()( const asset_create_operation& op ) {} - void operator()( const asset_update_operation& op ) - { - if( op.new_issuer ) - _impacted.insert( *(op.new_issuer) ); - } - - void operator()( const asset_update_bitasset_operation& op ) {} - void operator()( const asset_update_dividend_operation& op ) {} - void operator()( const asset_dividend_distribution_operation& op ) - { - _impacted.insert( op.account_id ); - } - - void operator()( const asset_update_feed_producers_operation& op ) {} - - void operator()( const asset_issue_operation& op ) - { - _impacted.insert( op.issue_to_account ); - } - - void operator()( const asset_reserve_operation& op ) {} - void operator()( const asset_fund_fee_pool_operation& op ) {} - void operator()( const asset_settle_operation& op ) {} - void operator()( const asset_global_settle_operation& op ) {} - void operator()( const asset_publish_feed_operation& op ) {} - void operator()( const witness_create_operation& op ) - { - _impacted.insert( op.witness_account ); - } - void operator()( const witness_update_operation& op ) - { - _impacted.insert( op.witness_account ); - } - - void operator()( const proposal_create_operation& op ) - { - vector other; - for( const auto& proposed_op : op.proposed_ops ) - operation_get_required_authorities( proposed_op.op, _impacted, _impacted, other ); - for( auto& o : other ) - add_authority_accounts( _impacted, o ); - } - - void operator()( const proposal_update_operation& op ) {} - void operator()( const proposal_delete_operation& op ) {} - - void operator()( const withdraw_permission_create_operation& op ) - { - _impacted.insert( op.authorized_account ); - } - - void operator()( const withdraw_permission_update_operation& op ) - { - _impacted.insert( op.authorized_account ); - } - - void operator()( const withdraw_permission_claim_operation& op ) - { - _impacted.insert( op.withdraw_from_account ); - } - - void operator()( const withdraw_permission_delete_operation& op ) - { - _impacted.insert( op.authorized_account ); - } - - void operator()( const committee_member_create_operation& op ) - { - _impacted.insert( op.committee_member_account ); - } - void operator()( const committee_member_update_operation& op ) - { - _impacted.insert( op.committee_member_account ); - } - void operator()( const committee_member_update_global_parameters_operation& op ) {} - - void operator()( const vesting_balance_create_operation& op ) - { - _impacted.insert( op.owner ); - } - - void operator()( const vesting_balance_withdraw_operation& op ) {} - void operator()( const worker_create_operation& op ) {} - void operator()( const custom_operation& op ) {} - void operator()( const assert_operation& op ) {} - void operator()( const balance_claim_operation& op ) {} - - void operator()( const override_transfer_operation& op ) - { - _impacted.insert( op.to ); - _impacted.insert( op.from ); - _impacted.insert( op.issuer ); - } - - void operator()( const transfer_to_blind_operation& op ) - { - _impacted.insert( op.from ); - for( const auto& out : op.outputs ) - add_authority_accounts( _impacted, out.owner ); - } - - void operator()( const blind_transfer_operation& op ) - { - for( const auto& in : op.inputs ) - add_authority_accounts( _impacted, in.owner ); - for( const auto& out : op.outputs ) - add_authority_accounts( _impacted, out.owner ); - } - - void operator()( const transfer_from_blind_operation& op ) - { - _impacted.insert( op.to ); - for( const auto& in : op.inputs ) - add_authority_accounts( _impacted, in.owner ); - } - - void operator()( const asset_settle_cancel_operation& op ) - { - _impacted.insert( op.account ); - } - - void operator()( const fba_distribute_operation& op ) - { - _impacted.insert( op.account_id ); - } - - void operator()( const sport_create_operation& op ) {} - void operator()( const sport_update_operation& op ) {} - void operator()( const sport_delete_operation& op ) {} - void operator()( const event_group_create_operation& op ) {} - void operator()( const event_group_update_operation& op ) {} - void operator()( const event_group_delete_operation& op ) {} - void operator()( const event_create_operation& op ) {} - void operator()( const event_update_operation& op ) {} - void operator()( const event_update_status_operation& op ) {} - void operator()( const betting_market_rules_create_operation& op ) {} - void operator()( const betting_market_rules_update_operation& op ) {} - void operator()( const betting_market_group_create_operation& op ) {} - void operator()( const betting_market_group_update_operation& op ) {} - void operator()( const betting_market_create_operation& op ) {} - void operator()( const betting_market_update_operation& op ) {} - void operator()( const betting_market_group_resolve_operation& op ) {} - void operator()( const betting_market_group_cancel_unmatched_bets_operation& op ) {} - - void operator()( const bet_place_operation& op ) - { - _impacted.insert( op.bettor_id ); - } - void operator()( const bet_cancel_operation& op ) - { - _impacted.insert( op.bettor_id ); - } - void operator()( const bet_canceled_operation& op ) - { - _impacted.insert( op.bettor_id ); - } - void operator()( const bet_adjusted_operation& op ) - { - _impacted.insert( op.bettor_id ); - } - void operator()( const bet_matched_operation& op ) - { - _impacted.insert( op.bettor_id ); - } - void operator()( const betting_market_group_resolved_operation& op ) - { - _impacted.insert( op.bettor_id ); - } - - void operator()( const tournament_create_operation& op ) - { - _impacted.insert( op.creator ); - _impacted.insert( op.options.whitelist.begin(), op.options.whitelist.end() ); - } - void operator()( const tournament_join_operation& op ) - { - _impacted.insert( op.payer_account_id ); - _impacted.insert( op.player_account_id ); - } - void operator()( const tournament_leave_operation& op ) - { - //if account canceling registration is not the player, it must be the payer - if (op.canceling_account_id != op.player_account_id) - _impacted.erase( op.canceling_account_id ); - _impacted.erase( op.player_account_id ); - } - void operator()( const game_move_operation& op ) - { - _impacted.insert( op.player_account_id ); - } - void operator()( const tournament_payout_operation& op ) - { - _impacted.insert( op.payout_account_id ); - } - void operator()( const affiliate_payout_operation& op ) - { - _impacted.insert( op.affiliate ); - } - void operator()( const affiliate_referral_payout_operation& op ) { } - void operator()( const lottery_asset_create_operation& op) { } - void operator()( const ticket_purchase_operation& op ) - { - _impacted.insert( op.buyer ); - } - void operator()( const lottery_reward_operation& op ) { - _impacted.insert( op.winner ); - } - void operator()( const lottery_end_operation& op ) { - for( auto participant : op.participants ) { - _impacted.insert(participant.first); - } - } - void operator()( const sweeps_vesting_claim_operation& op ) { - _impacted.insert( op.account ); - } -}; - -void operation_get_impacted_accounts( const operation& op, flat_set& result ) -{ - get_impacted_account_visitor vtor = get_impacted_account_visitor( result ); - op.visit( vtor ); -} - -void transaction_get_impacted_accounts( const transaction& tx, flat_set& result ) -{ - for( const auto& op : tx.operations ) - operation_get_impacted_accounts( op, result ); -} - -} } diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index 9e468dca9..4adf73a3a 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -31,6 +31,8 @@ #include #include +#include + #include #include #include diff --git a/libraries/app/include/graphene/app/application.hpp b/libraries/app/include/graphene/app/application.hpp index 26ae78efd..a313e2f8c 100644 --- a/libraries/app/include/graphene/app/application.hpp +++ b/libraries/app/include/graphene/app/application.hpp @@ -56,14 +56,15 @@ namespace graphene { namespace app { auto plug = std::make_shared(); plug->plugin_set_app(this); - boost::program_options::options_description plugin_cli_options("Options for plugin " + plug->plugin_name()), plugin_cfg_options; + boost::program_options::options_description plugin_cli_options(plug->plugin_name() + " plugin. " + plug->plugin_description() + "\nOptions"), plugin_cfg_options; + //boost::program_options::options_description plugin_cli_options("Options for plugin " + plug->plugin_name()), plugin_cfg_options; plug->plugin_set_program_options(plugin_cli_options, plugin_cfg_options); if( !plugin_cli_options.options().empty() ) _cli_options.add(plugin_cli_options); if( !plugin_cfg_options.options().empty() ) _cfg_options.add(plugin_cfg_options); - add_plugin( plug->plugin_name(), plug ); + add_available_plugin( plug ); return plug; } std::shared_ptr get_plugin( const string& name )const; @@ -88,8 +89,14 @@ namespace graphene { namespace app { /// Emitted when syncing finishes (is_finished_syncing will return true) boost::signals2::signal syncing_finished; - private: - void add_plugin( const string& name, std::shared_ptr p ); + void enable_plugin( const string& name ); + + bool is_plugin_enabled(const string& name) const; + + std::shared_ptr elasticsearch_thread; + + private: + void add_available_plugin( std::shared_ptr p ); std::shared_ptr my; boost::program_options::options_description _cli_options; diff --git a/libraries/app/include/graphene/app/plugin.hpp b/libraries/app/include/graphene/app/plugin.hpp index c242130b9..45336f677 100644 --- a/libraries/app/include/graphene/app/plugin.hpp +++ b/libraries/app/include/graphene/app/plugin.hpp @@ -35,6 +35,7 @@ class abstract_plugin public: virtual ~abstract_plugin(){} virtual std::string plugin_name()const = 0; + virtual std::string plugin_description()const = 0; /** * @brief Perform early startup routines and register plugin indexes, callbacks, etc. @@ -100,6 +101,7 @@ class plugin : public abstract_plugin virtual ~plugin() override; virtual std::string plugin_name()const override; + virtual std::string plugin_description()const override; virtual void plugin_initialize( const boost::program_options::variables_map& options ) override; virtual void plugin_startup() override; virtual void plugin_shutdown() override; diff --git a/libraries/app/plugin.cpp b/libraries/app/plugin.cpp index 8568d3711..cae488a66 100644 --- a/libraries/app/plugin.cpp +++ b/libraries/app/plugin.cpp @@ -43,6 +43,11 @@ std::string plugin::plugin_name()const return ""; } +std::string plugin::plugin_description()const +{ + return ""; +} + void plugin::plugin_initialize( const boost::program_options::variables_map& options ) { return; diff --git a/libraries/chain/asset_object.cpp b/libraries/chain/asset_object.cpp index 88e5dfcab..82951d799 100644 --- a/libraries/chain/asset_object.cpp +++ b/libraries/chain/asset_object.cpp @@ -23,6 +23,8 @@ */ #include #include +#include +#include #include #include @@ -185,6 +187,41 @@ vector asset_object::get_holders( database& db ) const return holders; } +vector asset_object::get_ticket_ids( database& db ) const +{ + auto& asset_bal_idx = db.get_index_type< account_balance_index >().indices().get< by_asset_balance >(); + vector ids; + const auto range = asset_bal_idx.equal_range( boost::make_tuple( get_id() ) ); + + for( const account_balance_object& bal : boost::make_iterator_range( range.first, range.second ) ) + { + const auto& stats = bal.owner(db).statistics(db); + const account_transaction_history_object* ath = static_cast(&stats.most_recent_op(db)); + for( uint64_t balance = bal.balance.value; balance > 0;) + { + if(ath != nullptr) + { + const operation_history_object& oho = db.get( ath->operation_id ); + if( oho.op.which() == operation::tag::value && get_id() == oho.op.get().lottery) + { + uint64_t tickets_count = oho.op.get().tickets_to_buy; + ids.insert(ids.end(), tickets_count, oho.id.instance()); + balance -= tickets_count; + assert(balance >= 0); + } + + if( ath->next == account_transaction_history_id_type() ) + { + ath = nullptr; + break; + } + else ath = db.find(ath->next); + } + } + } + return ids; +} + void asset_object::distribute_benefactors_part( database& db ) { transaction_evaluation_state eval( &db ); @@ -206,6 +243,7 @@ map< account_id_type, vector< uint16_t > > asset_object::distribute_winners_part transaction_evaluation_state eval( &db ); auto holders = get_holders( db ); + vector ticket_ids = get_ticket_ids(db); FC_ASSERT( dynamic_data( db ).current_supply == holders.size() ); map > structurized_participants; for( account_id_type holder : holders ) @@ -234,6 +272,11 @@ map< account_id_type, vector< uint16_t > > asset_object::distribute_winners_part reward_op.lottery = get_id(); reward_op.is_benefactor_reward = false; reward_op.winner = holders[winner_num]; + if(db.head_block_time() > HARDFORK_5050_1_TIME && ticket_ids.size() >= winner_num) + { + const static_variant tkt_id = ticket_ids[winner_num]; + reward_op.winner_ticket_id = tkt_id; + } reward_op.win_percentage = tickets[c]; reward_op.amount = asset( jackpot * tickets[c] * ( 1. - sweeps_distribution_percentage / (double)GRAPHENE_100_PERCENT ) / GRAPHENE_100_PERCENT , db.get_balance(id).asset_id ); db.apply_operation(eval, reward_op); diff --git a/libraries/chain/db_block.cpp b/libraries/chain/db_block.cpp index 99503bfb7..15112b165 100644 --- a/libraries/chain/db_block.cpp +++ b/libraries/chain/db_block.cpp @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -133,82 +134,90 @@ bool database::push_block(const signed_block& new_block, uint32_t skip) bool database::_push_block(const signed_block& new_block) { try { uint32_t skip = get_node_properties().skip_flags; - if( !(skip&skip_fork_db) ) + const auto now = fc::time_point::now().sec_since_epoch(); + + if( _fork_db.head() && new_block.timestamp.sec_since_epoch() > now - 86400 ) { - /// TODO: if the block is greater than the head block and before the next maitenance interval // verify that the block signer is in the current set of active witnesses. + shared_ptr prev_block = _fork_db.fetch_block( new_block.previous ); + GRAPHENE_ASSERT( prev_block, unlinkable_block_exception, "block does not link to known chain" ); + if( prev_block->scheduled_witnesses && !(skip&(skip_witness_schedule_check|skip_witness_signature)) ) + verify_signing_witness( new_block, *prev_block ); + } + shared_ptr new_head = _fork_db.push_block(new_block); - shared_ptr new_head = _fork_db.push_block(new_block); - //If the head block from the longest chain does not build off of the current head, we need to switch forks. - if( new_head->data.previous != head_block_id() ) + //If the head block from the longest chain does not build off of the current head, we need to switch forks. + if( new_head->data.previous != head_block_id() ) + { + //If the newly pushed block is the same height as head, we get head back in new_head + //Only switch forks if new_head is actually higher than head + if( new_head->data.block_num() > head_block_num() ) { - //If the newly pushed block is the same height as head, we get head back in new_head - //Only switch forks if new_head is actually higher than head - if( new_head->data.block_num() > head_block_num() ) + wlog( "Switching to fork: ${id}", ("id",new_head->data.id()) ); + auto branches = _fork_db.fetch_branch_from(new_head->data.id(), head_block_id()); + + // pop blocks until we hit the forked block + while( head_block_id() != branches.second.back()->data.previous ) { - wlog( "Switching to fork: ${id}", ("id",new_head->data.id()) ); - auto branches = _fork_db.fetch_branch_from(new_head->data.id(), head_block_id()); - - // pop blocks until we hit the forked block - while( head_block_id() != branches.second.back()->data.previous ) - { - ilog( "popping block #${n} ${id}", ("n",head_block_num())("id",head_block_id()) ); - pop_block(); - } - - // push all blocks on the new fork - for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr ) - { - ilog( "pushing block from fork #${n} ${id}", ("n",(*ritr)->data.block_num())("id",(*ritr)->id) ); - optional except; - try { - undo_database::session session = _undo_db.start_undo_session(); - apply_block( (*ritr)->data, skip ); - _block_id_to_block.store( (*ritr)->id, (*ritr)->data ); - session.commit(); - } - catch ( const fc::exception& e ) { except = e; } - if( except ) - { - wlog( "exception thrown while switching forks ${e}", ("e",except->to_detail_string() ) ); - // remove the rest of branches.first from the fork_db, those blocks are invalid - while( ritr != branches.first.rend() ) - { - ilog( "removing block from fork_db #${n} ${id}", ("n",(*ritr)->data.block_num())("id",(*ritr)->id) ); - _fork_db.remove( (*ritr)->id ); - ++ritr; - } - _fork_db.set_head( branches.second.front() ); - - // pop all blocks from the bad fork - while( head_block_id() != branches.second.back()->data.previous ) - { - ilog( "popping block #${n} ${id}", ("n",head_block_num())("id",head_block_id()) ); - pop_block(); - } - - ilog( "Switching back to fork: ${id}", ("id",branches.second.front()->data.id()) ); - // restore all blocks from the good fork - for( auto ritr2 = branches.second.rbegin(); ritr2 != branches.second.rend(); ++ritr2 ) - { - ilog( "pushing block #${n} ${id}", ("n",(*ritr2)->data.block_num())("id",(*ritr2)->id) ); - auto session = _undo_db.start_undo_session(); - apply_block( (*ritr2)->data, skip ); - _block_id_to_block.store( (*ritr2)->id, (*ritr2)->data ); - session.commit(); - } - throw *except; - } - } - return true; + ilog( "popping block #${n} ${id}", ("n",head_block_num())("id",head_block_id()) ); + pop_block(); } - else return false; + + // push all blocks on the new fork + for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr ) + { + ilog( "pushing block from fork #${n} ${id}", ("n",(*ritr)->data.block_num())("id",(*ritr)->id) ); + optional except; + try { + undo_database::session session = _undo_db.start_undo_session(); + apply_block( (*ritr)->data, skip ); + update_witnesses( **ritr ); + _block_id_to_block.store( (*ritr)->id, (*ritr)->data ); + session.commit(); + } + catch ( const fc::exception& e ) { except = e; } + if( except ) + { + wlog( "exception thrown while switching forks ${e}", ("e",except->to_detail_string() ) ); + // remove the rest of branches.first from the fork_db, those blocks are invalid + while( ritr != branches.first.rend() ) + { + ilog( "removing block from fork_db #${n} ${id}", ("n",(*ritr)->data.block_num())("id",(*ritr)->id) ); + _fork_db.remove( (*ritr)->id ); + ++ritr; + } + _fork_db.set_head( branches.second.front() ); + + // pop all blocks from the bad fork + while( head_block_id() != branches.second.back()->data.previous ) + { + ilog( "popping block #${n} ${id}", ("n",head_block_num())("id",head_block_id()) ); + pop_block(); + } + + ilog( "Switching back to fork: ${id}", ("id",branches.second.front()->data.id()) ); + // restore all blocks from the good fork + for( auto ritr2 = branches.second.rbegin(); ritr2 != branches.second.rend(); ++ritr2 ) + { + ilog( "pushing block #${n} ${id}", ("n",(*ritr2)->data.block_num())("id",(*ritr2)->id) ); + auto session = _undo_db.start_undo_session(); + apply_block( (*ritr2)->data, skip ); + _block_id_to_block.store( (*ritr2)->id, (*ritr2)->data ); + session.commit(); + } + throw *except; + } + } + return true; } + else return false; } try { auto session = _undo_db.start_undo_session(); apply_block(new_block, skip); + if( new_block.timestamp.sec_since_epoch() > now - 86400 ) + update_witnesses( *new_head ); _block_id_to_block.store(new_block.id(), new_block); session.commit(); } catch ( const fc::exception& e ) { @@ -220,6 +229,73 @@ bool database::_push_block(const signed_block& new_block) return false; } FC_CAPTURE_AND_RETHROW( (new_block) ) } +void database::verify_signing_witness( const signed_block& new_block, const fork_item& fork_entry )const +{ + FC_ASSERT( new_block.timestamp >= fork_entry.next_block_time ); + uint32_t slot_num = ( new_block.timestamp - fork_entry.next_block_time ).to_seconds() / block_interval(); + const global_property_object& gpo = get_global_properties(); + + if (gpo.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SHUFFLED_ALGORITHM) + { + uint64_t index = ( fork_entry.next_block_aslot + slot_num ) % fork_entry.scheduled_witnesses->size(); + const auto& scheduled_witness = (*fork_entry.scheduled_witnesses)[index]; + FC_ASSERT( new_block.witness == scheduled_witness.first, "Witness produced block at wrong time", + ("block witness",new_block.witness)("scheduled",scheduled_witness)("slot_num",slot_num) ); + FC_ASSERT( new_block.validate_signee( scheduled_witness.second ) ); + } + if (gpo.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SCHEDULED_ALGORITHM && + slot_num != 0 ) + { + witness_id_type wid; + const witness_schedule_object& wso = get_witness_schedule_object(); + // ask the near scheduler who goes in the given slot + bool slot_is_near = wso.scheduler.get_slot(slot_num, wid); + if(! slot_is_near) + { + // if the near scheduler doesn't know, we have to extend it to + // a far scheduler. + // n.b. instantiating it is slow, but block gaps long enough to + // need it are likely pretty rare. + + witness_scheduler_rng far_rng(wso.rng_seed.begin(), GRAPHENE_FAR_SCHEDULE_CTR_IV); + + far_future_witness_scheduler far_scheduler = + far_future_witness_scheduler(wso.scheduler, far_rng); + if(!far_scheduler.get_slot(slot_num, wid)) + { + // no scheduled witness -- somebody set up us the bomb + // n.b. this code path is impossible, the present + // implementation of far_future_witness_scheduler + // returns true unconditionally + assert( false ); + } + } + + FC_ASSERT( new_block.witness == wid, "Witness produced block at wrong time", + ("block witness",new_block.witness)("scheduled",wid)("slot_num",slot_num) ); + FC_ASSERT( new_block.validate_signee( wid(*this).signing_key ) ); + } +} + +void database::update_witnesses( fork_item& fork_entry )const +{ + if( fork_entry.scheduled_witnesses ) return; + + const dynamic_global_property_object& dpo = get_dynamic_global_properties(); + fork_entry.next_block_aslot = dpo.current_aslot + 1; + fork_entry.next_block_time = get_slot_time( 1 ); + + const witness_schedule_object& wso = get_witness_schedule_object(); + fork_entry.scheduled_witnesses = std::make_shared< vector< pair< witness_id_type, public_key_type > > >(); + fork_entry.scheduled_witnesses->reserve( wso.current_shuffled_witnesses.size() ); + + for( size_t i = 0; i < wso.current_shuffled_witnesses.size(); ++i ) + { + const auto& witness = wso.current_shuffled_witnesses[i](*this); + fork_entry.scheduled_witnesses->emplace_back( wso.current_shuffled_witnesses[i], witness.signing_key ); + } +} + /** * Attempts to push the transaction into the pending queue * @@ -260,7 +336,7 @@ processed_transaction database::_push_transaction( const signed_transaction& trx temp_session.merge(); // notify anyone listening to pending transactions - on_pending_transaction( trx ); + notify_on_pending_transaction( trx ); return processed_trx; } @@ -594,7 +670,7 @@ void database::_apply_block( const signed_block& next_block ) apply_debug_updates(); // notify observers that the block has been applied - applied_block( next_block ); //emit + notify_applied_block( next_block ); //emit _applied_ops.clear(); notify_changed_objects(); diff --git a/libraries/chain/db_notify.cpp b/libraries/chain/db_notify.cpp index 3404989a8..e91eaa6b2 100644 --- a/libraries/chain/db_notify.cpp +++ b/libraries/chain/db_notify.cpp @@ -33,6 +33,14 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include + using namespace fc; using namespace graphene::chain; @@ -287,13 +295,13 @@ struct get_impacted_account_visitor } }; -void operation_get_impacted_accounts( const operation& op, flat_set& result ) +void graphene::chain::operation_get_impacted_accounts( const operation& op, flat_set& result ) { get_impacted_account_visitor vtor = get_impacted_account_visitor( result ); op.visit( vtor ); } -void transaction_get_impacted_accounts( const transaction& tx, flat_set& result ) +void graphene::chain::transaction_get_impacted_accounts( const transaction& tx, flat_set& result ) { for( const auto& op : tx.operations ) operation_get_impacted_accounts( op, result ); @@ -433,6 +441,16 @@ void get_relevant_accounts( const object* obj, flat_set& accoun namespace graphene { namespace chain { +void database::notify_applied_block( const signed_block& block ) +{ + GRAPHENE_TRY_NOTIFY( applied_block, block ) +} + +void database::notify_on_pending_transaction( const signed_transaction& tx ) +{ + GRAPHENE_TRY_NOTIFY( on_pending_transaction, tx ) +} + void database::notify_changed_objects() { try { if( _undo_db.enabled() ) @@ -452,7 +470,7 @@ void database::notify_changed_objects() get_relevant_accounts(obj, new_accounts_impacted); } - new_objects(new_ids, new_accounts_impacted); + GRAPHENE_TRY_NOTIFY( new_objects, new_ids, new_accounts_impacted) } // Changed @@ -466,7 +484,7 @@ void database::notify_changed_objects() get_relevant_accounts(item.second.get(), changed_accounts_impacted); } - changed_objects(changed_ids, changed_accounts_impacted); + GRAPHENE_TRY_NOTIFY( changed_objects, changed_ids, changed_accounts_impacted) } // Removed @@ -483,7 +501,7 @@ void database::notify_changed_objects() get_relevant_accounts(obj, removed_accounts_impacted); } - removed_objects(removed_ids, removed, removed_accounts_impacted); + GRAPHENE_TRY_NOTIFY( removed_objects, removed_ids, removed, removed_accounts_impacted) } } } FC_CAPTURE_AND_LOG( (0) ) } diff --git a/libraries/chain/db_witness_schedule.cpp b/libraries/chain/db_witness_schedule.cpp index 7a6bb219c..762157bce 100644 --- a/libraries/chain/db_witness_schedule.cpp +++ b/libraries/chain/db_witness_schedule.cpp @@ -45,7 +45,7 @@ witness_id_type database::get_scheduled_witness( uint32_t slot_num )const if (gpo.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SCHEDULED_ALGORITHM && slot_num != 0 ) { - const witness_schedule_object& wso = get_witness_schedule_object();; + const witness_schedule_object& wso = get_witness_schedule_object(); // ask the near scheduler who goes in the given slot bool slot_is_near = wso.scheduler.get_slot(slot_num-1, wid); if(! slot_is_near) diff --git a/libraries/chain/hardfork.d/5050-1.hf b/libraries/chain/hardfork.d/5050-1.hf new file mode 100644 index 000000000..51c127329 --- /dev/null +++ b/libraries/chain/hardfork.d/5050-1.hf @@ -0,0 +1,4 @@ +// 5050_1 HARDFORK Thursday, 22 April 2020 20:00:00 GMT +#ifndef HARDFORK_5050_1_TIME +#define HARDFORK_5050_1_TIME (fc::time_point_sec( 1587585600 )) +#endif diff --git a/libraries/chain/include/graphene/chain/asset_object.hpp b/libraries/chain/include/graphene/chain/asset_object.hpp index 8978a6d15..d8c65e898 100644 --- a/libraries/chain/include/graphene/chain/asset_object.hpp +++ b/libraries/chain/include/graphene/chain/asset_object.hpp @@ -134,6 +134,7 @@ namespace graphene { namespace chain { optional lottery_options; time_point_sec get_lottery_expiration() const; vector get_holders( database& db ) const; + vector get_ticket_ids( database& db ) const; void distribute_benefactors_part( database& db ); map< account_id_type, vector< uint16_t > > distribute_winners_part( database& db ); void distribute_sweeps_holders_part( database& db ); diff --git a/libraries/chain/include/graphene/chain/config.hpp b/libraries/chain/include/graphene/chain/config.hpp index 19aef159b..f82ed2e5c 100644 --- a/libraries/chain/include/graphene/chain/config.hpp +++ b/libraries/chain/include/graphene/chain/config.hpp @@ -43,7 +43,7 @@ #define GRAPHENE_MIN_BLOCK_INTERVAL 1 /* seconds */ #define GRAPHENE_MAX_BLOCK_INTERVAL 30 /* seconds */ -#define GRAPHENE_DEFAULT_BLOCK_INTERVAL 5 /* seconds */ +#define GRAPHENE_DEFAULT_BLOCK_INTERVAL 3 /* seconds */ #define GRAPHENE_DEFAULT_MAX_TRANSACTION_SIZE 2048 #define GRAPHENE_DEFAULT_MAX_BLOCK_SIZE (GRAPHENE_DEFAULT_MAX_TRANSACTION_SIZE*GRAPHENE_DEFAULT_BLOCK_INTERVAL*200000) #define GRAPHENE_DEFAULT_MAX_TIME_UNTIL_EXPIRATION (60*60*24) // seconds, aka: 1 day @@ -151,7 +151,7 @@ #define GRAPHENE_RECENTLY_MISSED_COUNT_INCREMENT 4 #define GRAPHENE_RECENTLY_MISSED_COUNT_DECREMENT 3 -#define GRAPHENE_CURRENT_DB_VERSION "PPY2.3" +#define GRAPHENE_CURRENT_DB_VERSION "PPY2.4" #define GRAPHENE_IRREVERSIBLE_THRESHOLD (70 * GRAPHENE_1_PERCENT) @@ -179,7 +179,7 @@ #define GRAPHENE_FBA_STEALTH_DESIGNATED_ASSET (asset_id_type(743)) -#define GRAPHENE_DEFAULT_RAKE_FEE_PERCENTAGE (5*GRAPHENE_1_PERCENT) +#define GRAPHENE_DEFAULT_RAKE_FEE_PERCENTAGE (3*GRAPHENE_1_PERCENT) /** * Betting-related constants. diff --git a/libraries/chain/include/graphene/chain/database.hpp b/libraries/chain/include/graphene/chain/database.hpp index aa26c0d4f..e8c8c3f42 100644 --- a/libraries/chain/include/graphene/chain/database.hpp +++ b/libraries/chain/include/graphene/chain/database.hpp @@ -467,6 +467,8 @@ namespace graphene { namespace chain { protected: //Mark pop_undo() as protected -- we do not want outside calling pop_undo(); it should call pop_block() instead void pop_undo() { object_database::pop_undo(); } + void notify_applied_block( const signed_block& block ); + void notify_on_pending_transaction( const signed_transaction& tx ); void notify_changed_objects(); private: @@ -492,6 +494,8 @@ namespace graphene { namespace chain { const witness_object& validate_block_header( uint32_t skip, const signed_block& next_block )const; const witness_object& _validate_block_header( const signed_block& next_block )const; + void verify_signing_witness( const signed_block& new_block, const fork_item& fork_entry )const; + void update_witnesses( fork_item& fork_entry )const; void create_block_summary(const signed_block& next_block); //////////////////// db_witness_schedule.cpp //////////////////// diff --git a/libraries/chain/include/graphene/chain/exceptions.hpp b/libraries/chain/include/graphene/chain/exceptions.hpp index 2e07ca26f..ee2640291 100644 --- a/libraries/chain/include/graphene/chain/exceptions.hpp +++ b/libraries/chain/include/graphene/chain/exceptions.hpp @@ -65,6 +65,21 @@ msg \ ) +#define GRAPHENE_TRY_NOTIFY( signal, ... ) \ + try \ + { \ + signal( __VA_ARGS__ ); \ + } \ + catch( const graphene::chain::plugin_exception& e ) \ + { \ + elog( "Caught plugin exception: ${e}", ("e", e.to_detail_string() ) ); \ + throw; \ + } \ + catch( ... ) \ + { \ + wlog( "Caught unexpected exception in plugin" ); \ + } + namespace graphene { namespace chain { FC_DECLARE_EXCEPTION( chain_exception, 3000000, "blockchain exception" ) @@ -77,6 +92,7 @@ namespace graphene { namespace chain { FC_DECLARE_DERIVED_EXCEPTION( undo_database_exception, graphene::chain::chain_exception, 3070000, "undo database exception" ) FC_DECLARE_DERIVED_EXCEPTION( unlinkable_block_exception, graphene::chain::chain_exception, 3080000, "unlinkable block" ) FC_DECLARE_DERIVED_EXCEPTION( black_swan_exception, graphene::chain::chain_exception, 3090000, "black swan" ) + FC_DECLARE_DERIVED_EXCEPTION( plugin_exception, graphene::chain::chain_exception, 3100000, "plugin exception" ) FC_DECLARE_DERIVED_EXCEPTION( tx_missing_active_auth, graphene::chain::transaction_exception, 3030001, "missing required active authority" ) FC_DECLARE_DERIVED_EXCEPTION( tx_missing_owner_auth, graphene::chain::transaction_exception, 3030002, "missing required owner authority" ) diff --git a/libraries/chain/include/graphene/chain/fork_database.hpp b/libraries/chain/include/graphene/chain/fork_database.hpp index 8ca95b5e4..4007ca09f 100644 --- a/libraries/chain/include/graphene/chain/fork_database.hpp +++ b/libraries/chain/include/graphene/chain/fork_database.hpp @@ -51,6 +51,11 @@ namespace graphene { namespace chain { bool invalid = false; block_id_type id; signed_block data; + + // contains witness block signing keys scheduled *after* the block has been applied + shared_ptr< vector< pair< witness_id_type, public_key_type > > > scheduled_witnesses; + uint64_t next_block_aslot = 0; + fc::time_point_sec next_block_time; }; typedef shared_ptr item_ptr; diff --git a/libraries/app/include/graphene/app/impacted.hpp b/libraries/chain/include/graphene/chain/impacted.hpp similarity index 96% rename from libraries/app/include/graphene/app/impacted.hpp rename to libraries/chain/include/graphene/chain/impacted.hpp index 2e59b9104..2a22cbd12 100644 --- a/libraries/app/include/graphene/app/impacted.hpp +++ b/libraries/chain/include/graphene/chain/impacted.hpp @@ -28,7 +28,7 @@ #include #include -namespace graphene { namespace app { +namespace graphene { namespace chain { void operation_get_impacted_accounts( const graphene::chain::operation& op, @@ -39,4 +39,4 @@ void transaction_get_impacted_accounts( fc::flat_set& result ); -} } // graphene::app +} } // graphene::app \ No newline at end of file diff --git a/libraries/chain/include/graphene/chain/operation_history_object.hpp b/libraries/chain/include/graphene/chain/operation_history_object.hpp index b35b171f9..891994727 100644 --- a/libraries/chain/include/graphene/chain/operation_history_object.hpp +++ b/libraries/chain/include/graphene/chain/operation_history_object.hpp @@ -102,6 +102,16 @@ namespace graphene { namespace chain { struct by_seq; struct by_op; struct by_opid; + +typedef multi_index_container< + operation_history_object, + indexed_by< + ordered_unique< tag, member< object, object_id_type, &object::id > > + > +> operation_history_multi_index_type; + +typedef generic_index operation_history_index; + typedef multi_index_container< account_transaction_history_object, indexed_by< diff --git a/libraries/chain/include/graphene/chain/protocol/lottery_ops.hpp b/libraries/chain/include/graphene/chain/protocol/lottery_ops.hpp index 32d70a370..4f512bce9 100644 --- a/libraries/chain/include/graphene/chain/protocol/lottery_ops.hpp +++ b/libraries/chain/include/graphene/chain/protocol/lottery_ops.hpp @@ -52,6 +52,8 @@ namespace graphene { namespace chain { share_type calculate_fee( const fee_parameters_type& k )const; }; + typedef static_variant ticket_num; + /** * @ingroup operations */ @@ -73,7 +75,7 @@ namespace graphene { namespace chain { // true if recieved from benefators section of lottery; false otherwise bool is_benefactor_reward; - extensions_type extensions; + ticket_num winner_ticket_id; account_id_type fee_payer()const { return account_id_type(); } void validate()const {}; @@ -114,7 +116,7 @@ FC_REFLECT( graphene::chain::ticket_purchase_operation, ) FC_REFLECT( graphene::chain::ticket_purchase_operation::fee_parameters_type, (fee) ) - +FC_REFLECT_TYPENAME( graphene::chain::ticket_num ) FC_REFLECT( graphene::chain::lottery_reward_operation, (fee) (lottery) @@ -122,7 +124,7 @@ FC_REFLECT( graphene::chain::lottery_reward_operation, (amount) (win_percentage) (is_benefactor_reward) - (extensions) + (winner_ticket_id) ) FC_REFLECT( graphene::chain::lottery_reward_operation::fee_parameters_type, (fee) ) diff --git a/libraries/fc b/libraries/fc index 89cbe19f9..0358ca257 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 89cbe19f99b99853e5ff4b2cc4cea0273a2071f5 +Subproject commit 0358ca257e4ce9d66c1097cd2e8e2d34ff89a297 diff --git a/libraries/net/include/graphene/net/peer_connection.hpp b/libraries/net/include/graphene/net/peer_connection.hpp index 5c5f40d50..61f1cef56 100644 --- a/libraries/net/include/graphene/net/peer_connection.hpp +++ b/libraries/net/include/graphene/net/peer_connection.hpp @@ -261,13 +261,13 @@ namespace graphene { namespace net fc::future accept_or_connect_task_done; firewall_check_state_data *firewall_check_state = nullptr; -#ifndef NDEBUG + private: +#ifndef NDEBUG fc::thread* _thread = nullptr; unsigned _send_message_queue_tasks_running = 0; // temporary debugging #endif bool _currently_handling_message = false; // true while we're in the middle of handling a message from the remote system - private: peer_connection(peer_connection_delegate* delegate); void destroy(); public: diff --git a/libraries/net/message_oriented_connection.cpp b/libraries/net/message_oriented_connection.cpp index 5dea08d4b..1bc1832ec 100644 --- a/libraries/net/message_oriented_connection.cpp +++ b/libraries/net/message_oriented_connection.cpp @@ -62,7 +62,8 @@ namespace graphene { namespace net { fc::time_point _last_message_received_time; fc::time_point _last_message_sent_time; - bool _send_message_in_progress; + std::atomic_bool _send_message_in_progress; + std::atomic_bool _read_loop_in_progress; #ifndef NDEBUG fc::thread* _thread; #endif @@ -98,7 +99,8 @@ namespace graphene { namespace net { _delegate(delegate), _bytes_received(0), _bytes_sent(0), - _send_message_in_progress(false) + _send_message_in_progress(false), + _read_loop_in_progress(false) #ifndef NDEBUG ,_thread(&fc::thread::current()) #endif @@ -138,6 +140,21 @@ namespace graphene { namespace net { _sock.bind(local_endpoint); } + class no_parallel_execution_guard final + { + std::atomic_bool* _flag; + public: + explicit no_parallel_execution_guard(std::atomic_bool* flag) : _flag(flag) + { + bool expected = false; + FC_ASSERT( flag->compare_exchange_strong( expected, true ), "Only one thread at time can visit it"); + } + ~no_parallel_execution_guard() + { + *_flag = false; + } + }; + void message_oriented_connection_impl::read_loop() { VERIFY_CORRECT_THREAD(); @@ -145,6 +162,7 @@ namespace graphene { namespace net { const int LEFTOVER = BUFFER_SIZE - sizeof(message_header); static_assert(BUFFER_SIZE >= sizeof(message_header), "insufficient buffer"); + no_parallel_execution_guard guard( &_read_loop_in_progress ); _connected_time = fc::time_point::now(); fc::oexception exception_to_rethrow; @@ -241,17 +259,7 @@ namespace graphene { namespace net { } send_message_scope_logger(remote_endpoint); #endif #endif - struct verify_no_send_in_progress { - bool& var; - verify_no_send_in_progress(bool& var) : var(var) - { - if (var) - elog("Error: two tasks are calling message_oriented_connection::send_message() at the same time"); - assert(!var); - var = true; - } - ~verify_no_send_in_progress() { var = false; } - } _verify_no_send_in_progress(_send_message_in_progress); + no_parallel_execution_guard guard( &_send_message_in_progress ); try { diff --git a/libraries/plugins/CMakeLists.txt b/libraries/plugins/CMakeLists.txt index 01079fe29..58728a9e5 100644 --- a/libraries/plugins/CMakeLists.txt +++ b/libraries/plugins/CMakeLists.txt @@ -2,6 +2,7 @@ add_subdirectory( witness ) add_subdirectory( account_history ) add_subdirectory( accounts_list ) add_subdirectory( affiliate_stats ) +add_subdirectory( elasticsearch ) add_subdirectory( market_history ) add_subdirectory( delayed_node ) add_subdirectory( bookie ) @@ -9,3 +10,4 @@ add_subdirectory( generate_genesis ) add_subdirectory( generate_uia_sharedrop_genesis ) add_subdirectory( debug_witness ) add_subdirectory( snapshot ) +add_subdirectory( es_objects ) diff --git a/libraries/plugins/account_history/account_history_plugin.cpp b/libraries/plugins/account_history/account_history_plugin.cpp index 81acb01ed..67322f800 100644 --- a/libraries/plugins/account_history/account_history_plugin.cpp +++ b/libraries/plugins/account_history/account_history_plugin.cpp @@ -24,7 +24,7 @@ #include -#include +#include #include #include @@ -128,8 +128,8 @@ void account_history_plugin_impl::update_account_histories( const signed_block& if( op.op.which() == operation::tag< account_create_operation >::value ) impacted.insert( op.result.get() ); else - graphene::app::operation_get_impacted_accounts( op.op, impacted ); - if( op.op.which() == operation::tag< lottery_end_operation >::value ) + graphene::chain::operation_get_impacted_accounts( op.op, impacted ); + if( op.op.which() == operation::tag< lottery_end_operation >::value ) { auto lop = op.op.get< lottery_end_operation >(); auto asset_object = lop.lottery( db ); @@ -137,6 +137,7 @@ void account_history_plugin_impl::update_account_histories( const signed_block& for( auto benefactor : asset_object.lottery_options->benefactors ) impacted.insert( benefactor.id ); } + for( auto& a : other ) for( auto& item : a.account_auths ) impacted.insert( item.first ); diff --git a/libraries/plugins/accounts_list/accounts_list_plugin.cpp b/libraries/plugins/accounts_list/accounts_list_plugin.cpp index aabf711d1..757891ea9 100644 --- a/libraries/plugins/accounts_list/accounts_list_plugin.cpp +++ b/libraries/plugins/accounts_list/accounts_list_plugin.cpp @@ -24,7 +24,7 @@ #include -#include +#include #include #include diff --git a/libraries/plugins/affiliate_stats/affiliate_stats_plugin.cpp b/libraries/plugins/affiliate_stats/affiliate_stats_plugin.cpp index 438b1acab..da9c8a04b 100644 --- a/libraries/plugins/affiliate_stats/affiliate_stats_plugin.cpp +++ b/libraries/plugins/affiliate_stats/affiliate_stats_plugin.cpp @@ -25,7 +25,7 @@ #include #include -#include +#include #include #include diff --git a/libraries/plugins/bookie/bookie_plugin.cpp b/libraries/plugins/bookie/bookie_plugin.cpp index 02ce2cbac..a6c13c9de 100644 --- a/libraries/plugins/bookie/bookie_plugin.cpp +++ b/libraries/plugins/bookie/bookie_plugin.cpp @@ -24,7 +24,7 @@ #include #include -#include +#include #include #include diff --git a/libraries/plugins/delayed_node/delayed_node_plugin.cpp b/libraries/plugins/delayed_node/delayed_node_plugin.cpp index f9db2ccda..d49129b08 100644 --- a/libraries/plugins/delayed_node/delayed_node_plugin.cpp +++ b/libraries/plugins/delayed_node/delayed_node_plugin.cpp @@ -58,7 +58,7 @@ delayed_node_plugin::~delayed_node_plugin() void delayed_node_plugin::plugin_set_program_options(bpo::options_description& cli, bpo::options_description& cfg) { cli.add_options() - ("trusted-node", boost::program_options::value()->required(), "RPC endpoint of a trusted validating node (required)") + ("trusted-node", boost::program_options::value(), "RPC endpoint of a trusted validating node (required)") ; cfg.add(cli); } @@ -74,6 +74,7 @@ void delayed_node_plugin::connect() void delayed_node_plugin::plugin_initialize(const boost::program_options::variables_map& options) { + FC_ASSERT(options.count("trusted-node") > 0); my->remote_endpoint = "ws://" + options.at("trusted-node").as(); } diff --git a/libraries/plugins/elasticsearch/CMakeLists.txt b/libraries/plugins/elasticsearch/CMakeLists.txt new file mode 100644 index 000000000..f4815576d --- /dev/null +++ b/libraries/plugins/elasticsearch/CMakeLists.txt @@ -0,0 +1,23 @@ +file(GLOB HEADERS "include/graphene/elasticsearch/*.hpp") + +add_library( graphene_elasticsearch + elasticsearch_plugin.cpp + ) + +target_link_libraries( graphene_elasticsearch graphene_chain graphene_app curl ) +target_include_directories( graphene_elasticsearch + PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) + +if(MSVC) + set_source_files_properties(elasticsearch_plugin.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) +endif(MSVC) + +install( TARGETS + graphene_elasticsearch + + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib +) +INSTALL( FILES ${HEADERS} DESTINATION "include/graphene/elasticsearch" ) + diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp new file mode 100644 index 000000000..484aef9cf --- /dev/null +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -0,0 +1,622 @@ +/* + * Copyright (c) 2017 Cryptonomex, Inc., and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include +#include +#include +#include +#include + +namespace graphene { namespace elasticsearch { + +namespace detail +{ + +class elasticsearch_plugin_impl +{ + public: + elasticsearch_plugin_impl(elasticsearch_plugin& _plugin) + : _self( _plugin ) + { curl = curl_easy_init(); } + virtual ~elasticsearch_plugin_impl(); + + bool update_account_histories( const signed_block& b ); + + graphene::chain::database& database() + { + return _self.database(); + } + + elasticsearch_plugin& _self; + primary_index< operation_history_index >* _oho_index; + + std::string _elasticsearch_node_url = "http://localhost:9200/"; + uint32_t _elasticsearch_bulk_replay = 10000; + uint32_t _elasticsearch_bulk_sync = 100; + bool _elasticsearch_visitor = false; + std::string _elasticsearch_basic_auth = ""; + std::string _elasticsearch_index_prefix = "peerplays-"; + bool _elasticsearch_operation_object = false; + uint32_t _elasticsearch_start_es_after_block = 0; + bool _elasticsearch_operation_string = true; + mode _elasticsearch_mode = mode::only_save; + CURL *curl; // curl handler + vector bulk_lines; // vector of op lines + vector prepare; + + graphene::utilities::ES es; + uint32_t limit_documents; + int16_t op_type; + operation_history_struct os; + block_struct bs; + visitor_struct vs; + bulk_struct bulk_line_struct; + std::string bulk_line; + std::string index_name; + bool is_sync = false; + fc::time_point last_sync; + private: + bool add_elasticsearch( const account_id_type account_id, const optional& oho, const uint32_t block_number ); + const account_transaction_history_object& addNewEntry(const account_statistics_object& stats_obj, + const account_id_type account_id, + const optional & oho); + const account_statistics_object& getStatsObject(const account_id_type account_id); + void growStats(const account_statistics_object& stats_obj, const account_transaction_history_object& ath); + void getOperationType(const optional & oho); + void doOperationHistory(const optional & oho); + void doBlock(const optional & oho, const signed_block& b); + void doVisitor(const optional & oho); + void checkState(const fc::time_point_sec& block_time); + void cleanObjects(const account_transaction_history_object& ath, account_id_type account_id); + void createBulkLine(const account_transaction_history_object& ath); + void prepareBulk(const account_transaction_history_id_type& ath_id); + void populateESstruct(); +}; + +elasticsearch_plugin_impl::~elasticsearch_plugin_impl() +{ + if (curl) { + curl_easy_cleanup(curl); + curl = nullptr; + } + return; +} + +bool elasticsearch_plugin_impl::update_account_histories( const signed_block& b ) +{ + checkState(b.timestamp); + index_name = graphene::utilities::generateIndexName(b.timestamp, _elasticsearch_index_prefix); + + graphene::chain::database& db = database(); + const vector >& hist = db.get_applied_operations(); + bool is_first = true; + auto skip_oho_id = [&is_first,&db,this]() { + if( is_first && db._undo_db.enabled() ) // this ensures that the current id is rolled back on undo + { + db.remove( db.create( []( operation_history_object& obj) {} ) ); + is_first = false; + } + else + _oho_index->use_next_id(); + }; + for( const optional< operation_history_object >& o_op : hist ) { + optional oho; + + auto create_oho = [&]() { + is_first = false; + return optional( + db.create([&](operation_history_object &h) { + if (o_op.valid()) + { + h.op = o_op->op; + h.result = o_op->result; + h.block_num = o_op->block_num; + h.trx_in_block = o_op->trx_in_block; + h.op_in_trx = o_op->op_in_trx; + h.virtual_op = o_op->virtual_op; + } + })); + }; + + if( !o_op.valid() ) { + skip_oho_id(); + continue; + } + oho = create_oho(); + + // populate what we can before impacted loop + getOperationType(oho); + doOperationHistory(oho); + doBlock(oho, b); + if(_elasticsearch_visitor) + doVisitor(oho); + + const operation_history_object& op = *o_op; + + // get the set of accounts this operation applies to + flat_set impacted; + vector other; + operation_get_required_authorities( op.op, impacted, impacted, other ); // fee_payer is added here + + if( op.op.which() == operation::tag< account_create_operation >::value ) + impacted.insert( op.result.get() ); + else + graphene::chain::operation_get_impacted_accounts( op.op, impacted ); + + for( auto& a : other ) + for( auto& item : a.account_auths ) + impacted.insert( item.first ); + + for( auto& account_id : impacted ) + { + if(!add_elasticsearch( account_id, oho, b.block_num() )) + return false; + } + } + // we send bulk at end of block when we are in sync for better real time client experience + if(is_sync) + { + populateESstruct(); + if(es.bulk_lines.size() > 0) + { + prepare.clear(); + if(!graphene::utilities::SendBulk(es)) + return false; + else + bulk_lines.clear(); + } + } + + return true; +} + +void elasticsearch_plugin_impl::checkState(const fc::time_point_sec& block_time) +{ + fc::time_point current_time(fc::time_point::now()); + if(((current_time - block_time) < fc::seconds(30)) || (current_time - last_sync > fc::seconds(60))) + { + limit_documents = _elasticsearch_bulk_sync; + is_sync = true; + last_sync = current_time; + } + else + { + limit_documents = _elasticsearch_bulk_replay; + is_sync = false; + } +} + +void elasticsearch_plugin_impl::getOperationType(const optional & oho) +{ + if (!oho->id.is_null()) + op_type = oho->op.which(); +} + +void elasticsearch_plugin_impl::doOperationHistory(const optional & oho) +{ + os.trx_in_block = oho->trx_in_block; + os.op_in_trx = oho->op_in_trx; + os.operation_result = fc::json::to_string(oho->result); + os.virtual_op = oho->virtual_op; + + if(_elasticsearch_operation_object) { + oho->op.visit(fc::from_static_variant(os.op_object, FC_PACK_MAX_DEPTH)); + adaptor_struct adaptor; + os.op_object = adaptor.adapt(os.op_object.get_object()); + } + if(_elasticsearch_operation_string) + os.op = fc::json::to_string(oho->op); +} + +void elasticsearch_plugin_impl::doBlock(const optional & oho, const signed_block& b) +{ + std::string trx_id = ""; + if(oho->trx_in_block < b.transactions.size()) + trx_id = b.transactions[oho->trx_in_block].id().str(); + bs.block_num = b.block_num(); + bs.block_time = b.timestamp; + bs.trx_id = trx_id; +} + +void elasticsearch_plugin_impl::doVisitor(const optional & oho) +{ + operation_visitor o_v; + oho->op.visit(o_v); + + vs.fee_data.asset = o_v.fee_asset; + vs.fee_data.amount = o_v.fee_amount; + + vs.transfer_data.asset = o_v.transfer_asset_id; + vs.transfer_data.amount = o_v.transfer_amount; + vs.transfer_data.from = o_v.transfer_from; + vs.transfer_data.to = o_v.transfer_to; + + vs.fill_data.order_id = o_v.fill_order_id; + vs.fill_data.account_id = o_v.fill_account_id; + vs.fill_data.pays_asset_id = o_v.fill_pays_asset_id; + vs.fill_data.pays_amount = o_v.fill_pays_amount; + vs.fill_data.receives_asset_id = o_v.fill_receives_asset_id; + vs.fill_data.receives_amount = o_v.fill_receives_amount; + //vs.fill_data.fill_price = o_v.fill_fill_price; + //vs.fill_data.is_maker = o_v.fill_is_maker; +} + +bool elasticsearch_plugin_impl::add_elasticsearch( const account_id_type account_id, + const optional & oho, + const uint32_t block_number) +{ + const auto &stats_obj = getStatsObject(account_id); + const auto &ath = addNewEntry(stats_obj, account_id, oho); + growStats(stats_obj, ath); + if(block_number > _elasticsearch_start_es_after_block) { + createBulkLine(ath); + prepareBulk(ath.id); + } + cleanObjects(ath, account_id); + + if (curl && bulk_lines.size() >= limit_documents) { // we are in bulk time, ready to add data to elasticsearech + prepare.clear(); + populateESstruct(); + if(!graphene::utilities::SendBulk(es)) + return false; + else + bulk_lines.clear(); + } + + return true; +} + +const account_statistics_object& elasticsearch_plugin_impl::getStatsObject(const account_id_type account_id) +{ + graphene::chain::database& db = database(); + const auto &acct = db.get(account_id); + return acct.statistics(db); +} + +const account_transaction_history_object& elasticsearch_plugin_impl::addNewEntry(const account_statistics_object& stats_obj, + const account_id_type account_id, + const optional & oho) +{ + graphene::chain::database& db = database(); + const auto &ath = db.create([&](account_transaction_history_object &obj) { + obj.operation_id = oho->id; + obj.account = account_id; + obj.sequence = stats_obj.total_ops + 1; + obj.next = stats_obj.most_recent_op; + }); + + return ath; +} + +void elasticsearch_plugin_impl::growStats(const account_statistics_object& stats_obj, + const account_transaction_history_object& ath) +{ + graphene::chain::database& db = database(); + db.modify(stats_obj, [&](account_statistics_object &obj) { + obj.most_recent_op = ath.id; + obj.total_ops = ath.sequence; + }); +} + +void elasticsearch_plugin_impl::createBulkLine(const account_transaction_history_object& ath) +{ + bulk_line_struct.account_history = ath; + bulk_line_struct.operation_history = os; + bulk_line_struct.operation_type = op_type; + bulk_line_struct.operation_id_num = ath.operation_id.instance.value; + bulk_line_struct.block_data = bs; + if(_elasticsearch_visitor) + bulk_line_struct.additional_data = vs; + bulk_line = fc::json::to_string(bulk_line_struct); +} + +void elasticsearch_plugin_impl::prepareBulk(const account_transaction_history_id_type& ath_id) +{ + const std::string _id = fc::json::to_string(ath_id); + fc::mutable_variant_object bulk_header; + bulk_header["_index"] = index_name; + bulk_header["_type"] = "data"; + bulk_header["_id"] = fc::to_string(ath_id.space_id) + "." + fc::to_string(ath_id.type_id) + "." + ath_id.instance; + prepare = graphene::utilities::createBulk(bulk_header, bulk_line); + bulk_lines.insert(bulk_lines.end(), prepare.begin(), prepare.end()); +} + +void elasticsearch_plugin_impl::cleanObjects(const account_transaction_history_object& ath, account_id_type account_id) +{ + graphene::chain::database& db = database(); + // remove everything except current object from ath + const auto &his_idx = db.get_index_type(); + const auto &by_seq_idx = his_idx.indices().get(); + auto itr = by_seq_idx.lower_bound(boost::make_tuple(account_id, 0)); + if (itr != by_seq_idx.end() && itr->account == account_id && itr->id != ath.id) { + // if found, remove the entry + const auto remove_op_id = itr->operation_id; + const auto itr_remove = itr; + ++itr; + db.remove( *itr_remove ); + // modify previous node's next pointer + // this should be always true, but just have a check here + if( itr != by_seq_idx.end() && itr->account == account_id ) + { + db.modify( *itr, [&]( account_transaction_history_object& obj ){ + obj.next = account_transaction_history_id_type(); + }); + } + // do the same on oho + const auto &by_opid_idx = his_idx.indices().get(); + if (by_opid_idx.find(remove_op_id) == by_opid_idx.end()) { + db.remove(remove_op_id(db)); + } + } +} + +void elasticsearch_plugin_impl::populateESstruct() +{ + es.curl = curl; + es.bulk_lines = bulk_lines; + es.elasticsearch_url = _elasticsearch_node_url; + es.auth = _elasticsearch_basic_auth; +} + +} // end namespace detail + +elasticsearch_plugin::elasticsearch_plugin() : + my( new detail::elasticsearch_plugin_impl(*this) ) +{ +} + +elasticsearch_plugin::~elasticsearch_plugin() +{ +} + +std::string elasticsearch_plugin::plugin_name()const +{ + return "elasticsearch"; +} +std::string elasticsearch_plugin::plugin_description()const +{ + return "Stores account history data in elasticsearch database(EXPERIMENTAL)."; +} + +void elasticsearch_plugin::plugin_set_program_options( + boost::program_options::options_description& cli, + boost::program_options::options_description& cfg + ) +{ + cli.add_options() + ("elasticsearch-node-url", boost::program_options::value(), + "Elastic Search database node url(http://localhost:9200/)") + ("elasticsearch-bulk-replay", boost::program_options::value(), + "Number of bulk documents to index on replay(10000)") + ("elasticsearch-bulk-sync", boost::program_options::value(), + "Number of bulk documents to index on a syncronied chain(100)") + ("elasticsearch-visitor", boost::program_options::value(), + "Use visitor to index additional data(slows down the replay(false))") + ("elasticsearch-basic-auth", boost::program_options::value(), + "Pass basic auth to elasticsearch database('')") + ("elasticsearch-index-prefix", boost::program_options::value(), + "Add a prefix to the index(peerplays-)") + ("elasticsearch-operation-object", boost::program_options::value(), + "Save operation as object(false)") + ("elasticsearch-start-es-after-block", boost::program_options::value(), + "Start doing ES job after block(0)") + ("elasticsearch-operation-string", boost::program_options::value(), + "Save operation as string. Needed to serve history api calls(true)") + ("elasticsearch-mode", boost::program_options::value(), + "Mode of operation: only_save(0), only_query(1), all(2) - Default: 0") + ; + cfg.add(cli); +} + +void elasticsearch_plugin::plugin_initialize(const boost::program_options::variables_map& options) +{ + my->_oho_index = database().add_index< primary_index< operation_history_index > >(); + database().add_index< primary_index< account_transaction_history_index > >(); + + if (options.count("elasticsearch-node-url")) { + my->_elasticsearch_node_url = options["elasticsearch-node-url"].as(); + } + if (options.count("elasticsearch-bulk-replay")) { + my->_elasticsearch_bulk_replay = options["elasticsearch-bulk-replay"].as(); + } + if (options.count("elasticsearch-bulk-sync")) { + my->_elasticsearch_bulk_sync = options["elasticsearch-bulk-sync"].as(); + } + if (options.count("elasticsearch-visitor")) { + my->_elasticsearch_visitor = options["elasticsearch-visitor"].as(); + } + if (options.count("elasticsearch-basic-auth")) { + my->_elasticsearch_basic_auth = options["elasticsearch-basic-auth"].as(); + } + if (options.count("elasticsearch-index-prefix")) { + my->_elasticsearch_index_prefix = options["elasticsearch-index-prefix"].as(); + } + if (options.count("elasticsearch-operation-object")) { + my->_elasticsearch_operation_object = options["elasticsearch-operation-object"].as(); + } + if (options.count("elasticsearch-start-es-after-block")) { + my->_elasticsearch_start_es_after_block = options["elasticsearch-start-es-after-block"].as(); + } + if (options.count("elasticsearch-operation-string")) { + my->_elasticsearch_operation_string = options["elasticsearch-operation-string"].as(); + } + if (options.count("elasticsearch-mode")) { + const auto option_number = options["elasticsearch-mode"].as(); + if(option_number > mode::all) + FC_THROW_EXCEPTION(fc::exception, "Elasticsearch mode not valid"); + my->_elasticsearch_mode = static_cast(options["elasticsearch-mode"].as()); + } + + if(my->_elasticsearch_mode != mode::only_query) { + if (my->_elasticsearch_mode == mode::all && !my->_elasticsearch_operation_string) + FC_THROW_EXCEPTION(fc::exception, + "If elasticsearch-mode is set to all then elasticsearch-operation-string need to be true"); + + database().applied_block.connect([this](const signed_block &b) { + if (!my->update_account_histories(b)) + FC_THROW_EXCEPTION(fc::exception, + "Error populating ES database, we are going to keep trying."); + }); + } +} + +void elasticsearch_plugin::plugin_startup() +{ + graphene::utilities::ES es; + es.curl = my->curl; + es.elasticsearch_url = my->_elasticsearch_node_url; + es.auth = my->_elasticsearch_basic_auth; + + if(!graphene::utilities::checkES(es)) + FC_THROW_EXCEPTION(fc::exception, "ES database is not up in url ${url}", ("url", my->_elasticsearch_node_url)); + ilog("elasticsearch ACCOUNT HISTORY: plugin_startup() begin"); +} + +operation_history_object elasticsearch_plugin::get_operation_by_id(operation_history_id_type id) +{ + const string operation_id_string = std::string(object_id_type(id)); + + const string query = R"( + { + "query": { + "match": + { + "account_history.operation_id": )" + operation_id_string + R"(" + } + } + } + )"; + + auto es = prepareHistoryQuery(query); + const auto response = graphene::utilities::simpleQuery(es); + variant variant_response = fc::json::from_string(response); + const auto source = variant_response["hits"]["hits"][size_t(0)]["_source"]; + return fromEStoOperation(source); +} + +vector elasticsearch_plugin::get_account_history( + const account_id_type account_id, + operation_history_id_type stop = operation_history_id_type(), + unsigned limit = 100, + operation_history_id_type start = operation_history_id_type()) +{ + const string account_id_string = std::string(object_id_type(account_id)); + + const auto stop_number = stop.instance.value; + const auto start_number = start.instance.value; + + string range = ""; + if(stop_number == 0) + range = " AND operation_id_num: ["+fc::to_string(stop_number)+" TO "+fc::to_string(start_number)+"]"; + else if(stop_number > 0) + range = " AND operation_id_num: {"+fc::to_string(stop_number)+" TO "+fc::to_string(start_number)+"]"; + + const string query = R"( + { + "size": )" + fc::to_string(limit) + R"(, + "sort" : [{ "operation_id_num" : {"order" : "desc"}}], + "query": { + "bool": { + "must": [ + { + "query_string": { + "query": "account_history.account: )" + account_id_string + range + R"(" + } + } + ] + } + } + } + )"; + + auto es = prepareHistoryQuery(query); + + vector result; + + if(!graphene::utilities::checkES(es)) + return result; + + const auto response = graphene::utilities::simpleQuery(es); + variant variant_response = fc::json::from_string(response); + + const auto hits = variant_response["hits"]["total"]["value"]; + uint32_t size; + if( hits.is_object() ) // ES-7 ? + size = static_cast(hits["value"].as_uint64()); + else // probably ES-6 + size = static_cast(hits.as_uint64()); + + size = std::min( size, limit ); + + for(unsigned i=0; i_elasticsearch_node_url; + es.index_prefix = my->_elasticsearch_index_prefix; + es.endpoint = es.index_prefix + "*/data/_search"; + es.query = query; + + return es; +} + +mode elasticsearch_plugin::get_running_mode() +{ + return my->_elasticsearch_mode; +} + + +} } diff --git a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp new file mode 100644 index 000000000..01a832448 --- /dev/null +++ b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp @@ -0,0 +1,289 @@ +/* + * Copyright (c) 2017 Cryptonomex, Inc., and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#pragma once + +#include +#include +#include +#include + +namespace graphene { namespace elasticsearch { + using namespace chain; + +// +// Plugins should #define their SPACE_ID's so plugins with +// conflicting SPACE_ID assignments can be compiled into the +// same binary (by simply re-assigning some of the conflicting #defined +// SPACE_ID's in a build script). +// +// Assignment of SPACE_ID's cannot be done at run-time because +// various template automagic depends on them being known at compile +// time. +// +#ifndef ELASTICSEARCH_SPACE_ID +#define ELASTICSEARCH_SPACE_ID 6 +#endif + +namespace detail +{ + class elasticsearch_plugin_impl; +} + +enum mode { only_save = 0 , only_query = 1, all = 2 }; + +class elasticsearch_plugin : public graphene::app::plugin +{ + public: + elasticsearch_plugin(); + virtual ~elasticsearch_plugin(); + + std::string plugin_name()const override; + std::string plugin_description()const override; + virtual void plugin_set_program_options( + boost::program_options::options_description& cli, + boost::program_options::options_description& cfg) override; + virtual void plugin_initialize(const boost::program_options::variables_map& options) override; + virtual void plugin_startup() override; + + operation_history_object get_operation_by_id(operation_history_id_type id); + vector get_account_history(const account_id_type account_id, + operation_history_id_type stop, unsigned limit, operation_history_id_type start); + mode get_running_mode(); + + friend class detail::elasticsearch_plugin_impl; + std::unique_ptr my; + + private: + operation_history_object fromEStoOperation(variant source); + graphene::utilities::ES prepareHistoryQuery(string query); +}; + + +struct operation_visitor +{ + typedef void result_type; + + share_type fee_amount; + asset_id_type fee_asset; + + asset_id_type transfer_asset_id; + share_type transfer_amount; + account_id_type transfer_from; + account_id_type transfer_to; + + void operator()( const graphene::chain::transfer_operation& o ) + { + fee_asset = o.fee.asset_id; + fee_amount = o.fee.amount; + + transfer_asset_id = o.amount.asset_id; + transfer_amount = o.amount.amount; + transfer_from = o.from; + transfer_to = o.to; + } + + object_id_type fill_order_id; + account_id_type fill_account_id; + asset_id_type fill_pays_asset_id; + share_type fill_pays_amount; + asset_id_type fill_receives_asset_id; + share_type fill_receives_amount; + //double fill_fill_price; + //bool fill_is_maker; + + void operator()( const graphene::chain::fill_order_operation& o ) + { + fee_asset = o.fee.asset_id; + fee_amount = o.fee.amount; + + fill_order_id = o.order_id; + fill_account_id = o.account_id; + fill_pays_asset_id = o.pays.asset_id; + fill_pays_amount = o.pays.amount; + fill_receives_asset_id = o.receives.asset_id; + fill_receives_amount = o.receives.amount; + //fill_fill_price = o.fill_price.to_real(); + //fill_is_maker = o.is_maker; + } + + template + void operator()( const T& o ) + { + fee_asset = o.fee.asset_id; + fee_amount = o.fee.amount; + } +}; + +struct operation_history_struct { + int trx_in_block; + int op_in_trx; + std::string operation_result; + int virtual_op; + std::string op; + variant op_object; +}; + +struct block_struct { + int block_num; + fc::time_point_sec block_time; + std::string trx_id; +}; + +struct fee_struct { + asset_id_type asset; + share_type amount; +}; + +struct transfer_struct { + asset_id_type asset; + share_type amount; + account_id_type from; + account_id_type to; +}; + +struct fill_struct { + object_id_type order_id; + account_id_type account_id; + asset_id_type pays_asset_id; + share_type pays_amount; + asset_id_type receives_asset_id; + share_type receives_amount; + double fill_price; + bool is_maker; +}; + +struct visitor_struct { + fee_struct fee_data; + transfer_struct transfer_data; + fill_struct fill_data; +}; + +struct bulk_struct { + account_transaction_history_object account_history; + operation_history_struct operation_history; + int operation_type; + int operation_id_num; + block_struct block_data; + optional additional_data; +}; + +struct adaptor_struct { + variant adapt(const variant_object& op) + { + fc::mutable_variant_object o(op); + vector keys_to_rename; + for (auto i = o.begin(); i != o.end(); ++i) + { + auto& element = (*i).value(); + if (element.is_object()) + { + const string& name = (*i).key(); + auto& vo = element.get_object(); + if (vo.contains(name.c_str())) + keys_to_rename.emplace_back(name); + element = adapt(vo); + } + else if (element.is_array()) + adapt(element.get_array()); + } + for (const auto& i : keys_to_rename) + { + string new_name = i + "_"; + o[new_name] = variant(o[i]); + o.erase(i); + } + + if (o.find("memo") != o.end()) + { + auto& memo = o["memo"]; + if (memo.is_string()) + { + o["memo_"] = o["memo"]; + o.erase("memo"); + } + else if (memo.is_object()) + { + fc::mutable_variant_object tmp(memo.get_object()); + if (tmp.find("nonce") != tmp.end()) + { + tmp["nonce"] = tmp["nonce"].as_string(); + o["memo"] = tmp; + } + } + } + if (o.find("new_parameters") != o.end()) + { + auto& tmp = o["new_parameters"]; + if (tmp.is_object()) + { + fc::mutable_variant_object tmp2(tmp.get_object()); + if (tmp2.find("current_fees") != tmp2.end()) + { + tmp2.erase("current_fees"); + o["new_parameters"] = tmp2; + } + } + } + if (o.find("owner") != o.end() && o["owner"].is_string()) + { + o["owner_"] = o["owner"].as_string(); + o.erase("owner"); + } + if (o.find("proposed_ops") != o.end()) + { + o["proposed_ops"] = fc::json::to_string(o["proposed_ops"]); + } + if (o.find("initializer") != o.end()) + { + o["initializer"] = fc::json::to_string(o["initializer"]); + } + + variant v; + fc::to_variant(o, v, FC_PACK_MAX_DEPTH); + return v; + } + + void adapt(fc::variants& v) + { + for (auto& array_element : v) + { + if (array_element.is_object()) + array_element = adapt(array_element.get_object()); + else if (array_element.is_array()) + adapt(array_element.get_array()); + else + array_element = array_element.as_string(); + } + } +}; +} } //graphene::elasticsearch + +FC_REFLECT_ENUM( graphene::elasticsearch::mode, (only_save)(only_query)(all) ) +FC_REFLECT( graphene::elasticsearch::operation_history_struct, (trx_in_block)(op_in_trx)(operation_result)(virtual_op)(op)(op_object) ) +FC_REFLECT( graphene::elasticsearch::block_struct, (block_num)(block_time)(trx_id) ) +FC_REFLECT( graphene::elasticsearch::fee_struct, (asset)(amount) ) +FC_REFLECT( graphene::elasticsearch::transfer_struct, (asset)(amount)(from)(to) ) +FC_REFLECT( graphene::elasticsearch::fill_struct, (order_id)(account_id)(pays_asset_id)(pays_amount)(receives_asset_id)(receives_amount)(fill_price)(is_maker)) +FC_REFLECT( graphene::elasticsearch::visitor_struct, (fee_data)(transfer_data)(fill_data) ) +FC_REFLECT( graphene::elasticsearch::bulk_struct, (account_history)(operation_history)(operation_type)(operation_id_num)(block_data)(additional_data) ) diff --git a/libraries/plugins/es_objects/CMakeLists.txt b/libraries/plugins/es_objects/CMakeLists.txt new file mode 100644 index 000000000..92e3d1502 --- /dev/null +++ b/libraries/plugins/es_objects/CMakeLists.txt @@ -0,0 +1,23 @@ +file(GLOB HEADERS "include/graphene/es_objects/*.hpp") + +add_library( graphene_es_objects + es_objects.cpp + ) + +target_link_libraries( graphene_es_objects graphene_chain graphene_app curl ) +target_include_directories( graphene_es_objects + PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) + +if(MSVC) + set_source_files_properties(es_objects.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) +endif(MSVC) + +install( TARGETS + graphene_es_objects + + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib +) +INSTALL( FILES ${HEADERS} DESTINATION "include/graphene/es_objects" ) + diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp new file mode 100644 index 000000000..b9083cbc5 --- /dev/null +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -0,0 +1,401 @@ +/* + * Copyright (c) 2018 oxarbitrage, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace graphene { namespace es_objects { + +namespace detail +{ + +class es_objects_plugin_impl +{ + public: + es_objects_plugin_impl(es_objects_plugin& _plugin) + : _self( _plugin ) + { curl = curl_easy_init(); } + virtual ~es_objects_plugin_impl(); + + bool index_database(const vector& ids, std::string action); + bool genesis(); + void remove_from_database(object_id_type id, std::string index); + + es_objects_plugin& _self; + std::string _es_objects_elasticsearch_url = "http://localhost:9200/"; + std::string _es_objects_auth = ""; + uint32_t _es_objects_bulk_replay = 10000; + uint32_t _es_objects_bulk_sync = 100; + bool _es_objects_proposals = true; + bool _es_objects_accounts = true; + bool _es_objects_assets = true; + bool _es_objects_balances = true; + bool _es_objects_limit_orders = true; + bool _es_objects_asset_bitasset = true; + std::string _es_objects_index_prefix = "ppobjects-"; + uint32_t _es_objects_start_es_after_block = 0; + CURL *curl; // curl handler + vector bulk; + vector prepare; + + bool _es_objects_keep_only_current = true; + + uint32_t block_number; + fc::time_point_sec block_time; + + private: + template + void prepareTemplate(T blockchain_object, string index_name); +}; + +bool es_objects_plugin_impl::genesis() +{ + + ilog("elasticsearch OBJECTS: inserting data from genesis"); + + graphene::chain::database &db = _self.database(); + + block_number = db.head_block_num(); + block_time = db.head_block_time(); + + if (_es_objects_accounts) { + auto &index_accounts = db.get_index(1, 2); + index_accounts.inspect_all_objects([this, &db](const graphene::db::object &o) { + auto obj = db.find_object(o.id); + auto a = static_cast(obj); + prepareTemplate(*a, "account"); + }); + } + if (_es_objects_assets) { + auto &index_assets = db.get_index(1, 3); + index_assets.inspect_all_objects([this, &db](const graphene::db::object &o) { + auto obj = db.find_object(o.id); + auto a = static_cast(obj); + prepareTemplate(*a, "asset"); + }); + } + if (_es_objects_balances) { + auto &index_balances = db.get_index(2, 5); + index_balances.inspect_all_objects([this, &db](const graphene::db::object &o) { + auto obj = db.find_object(o.id); + auto b = static_cast(obj); + prepareTemplate(*b, "balance"); + }); + } + + graphene::utilities::ES es; + es.curl = curl; + es.bulk_lines = bulk; + es.elasticsearch_url = _es_objects_elasticsearch_url; + es.auth = _es_objects_auth; + if (!graphene::utilities::SendBulk(es)) + FC_THROW_EXCEPTION(fc::exception, "Error inserting genesis data."); + else + bulk.clear(); + + return true; +} + +bool es_objects_plugin_impl::index_database(const vector& ids, std::string action) +{ + graphene::chain::database &db = _self.database(); + + block_time = db.head_block_time(); + block_number = db.head_block_num(); + + if(block_number > _es_objects_start_es_after_block) { + + // check if we are in replay or in sync and change number of bulk documents accordingly + uint32_t limit_documents = 0; + if ((fc::time_point::now() - block_time) < fc::seconds(30)) + limit_documents = _es_objects_bulk_sync; + else + limit_documents = _es_objects_bulk_replay; + + + for (auto const &value: ids) { + if (value.is() && _es_objects_proposals) { + auto obj = db.find_object(value); + auto p = static_cast(obj); + if (p != nullptr) { + if (action == "delete") + remove_from_database(p->id, "proposal"); + else + prepareTemplate(*p, "proposal"); + } + } else if (value.is() && _es_objects_accounts) { + auto obj = db.find_object(value); + auto a = static_cast(obj); + if (a != nullptr) { + if (action == "delete") + remove_from_database(a->id, "account"); + else + prepareTemplate(*a, "account"); + } + } else if (value.is() && _es_objects_assets) { + auto obj = db.find_object(value); + auto a = static_cast(obj); + if (a != nullptr) { + if (action == "delete") + remove_from_database(a->id, "asset"); + else + prepareTemplate(*a, "asset"); + } + } else if (value.is() && _es_objects_balances) { + auto obj = db.find_object(value); + auto b = static_cast(obj); + if (b != nullptr) { + if (action == "delete") + remove_from_database(b->id, "balance"); + else + prepareTemplate(*b, "balance"); + } + } else if (value.is() && _es_objects_limit_orders) { + auto obj = db.find_object(value); + auto l = static_cast(obj); + if (l != nullptr) { + if (action == "delete") + remove_from_database(l->id, "limitorder"); + else + prepareTemplate(*l, "limitorder"); + } + } else if (value.is() && _es_objects_asset_bitasset) { + auto obj = db.find_object(value); + auto ba = static_cast(obj); + if (ba != nullptr) { + if (action == "delete") + remove_from_database(ba->id, "bitasset"); + else + prepareTemplate(*ba, "bitasset"); + } + } + } + + if (curl && bulk.size() >= limit_documents) { // we are in bulk time, ready to add data to elasticsearech + + graphene::utilities::ES es; + es.curl = curl; + es.bulk_lines = bulk; + es.elasticsearch_url = _es_objects_elasticsearch_url; + es.auth = _es_objects_auth; + + if (!graphene::utilities::SendBulk(es)) + return false; + else + bulk.clear(); + } + } + + return true; +} + +void es_objects_plugin_impl::remove_from_database( object_id_type id, std::string index) +{ + if(_es_objects_keep_only_current) + { + fc::mutable_variant_object delete_line; + delete_line["_id"] = string(id); + delete_line["_index"] = _es_objects_index_prefix + index; + delete_line["_type"] = "data"; + fc::mutable_variant_object final_delete_line; + final_delete_line["delete"] = delete_line; + prepare.push_back(fc::json::to_string(final_delete_line)); + std::move(prepare.begin(), prepare.end(), std::back_inserter(bulk)); + prepare.clear(); + } +} + +template +void es_objects_plugin_impl::prepareTemplate(T blockchain_object, string index_name) +{ + fc::mutable_variant_object bulk_header; + bulk_header["_index"] = _es_objects_index_prefix + index_name; + bulk_header["_type"] = "data"; + if(_es_objects_keep_only_current) + { + bulk_header["_id"] = string(blockchain_object.id); + } + + adaptor_struct adaptor; + fc::variant blockchain_object_variant; + fc::to_variant( blockchain_object, blockchain_object_variant, GRAPHENE_NET_MAX_NESTED_OBJECTS ); + fc::mutable_variant_object o = adaptor.adapt(blockchain_object_variant.get_object()); + + o["object_id"] = string(blockchain_object.id); + o["block_time"] = block_time; + o["block_number"] = block_number; + + string data = fc::json::to_string(o); + + prepare = graphene::utilities::createBulk(bulk_header, std::move(data)); + std::move(prepare.begin(), prepare.end(), std::back_inserter(bulk)); + prepare.clear(); +} + +es_objects_plugin_impl::~es_objects_plugin_impl() +{ + if (curl) { + curl_easy_cleanup(curl); + curl = nullptr; + } + return; +} + +} // end namespace detail + +es_objects_plugin::es_objects_plugin() : + my( new detail::es_objects_plugin_impl(*this) ) +{ +} + +es_objects_plugin::~es_objects_plugin() +{ +} + +std::string es_objects_plugin::plugin_name()const +{ + return "es_objects"; +} +std::string es_objects_plugin::plugin_description()const +{ + return "Stores blockchain objects in ES database. Experimental."; +} + +void es_objects_plugin::plugin_set_program_options( + boost::program_options::options_description& cli, + boost::program_options::options_description& cfg + ) +{ + cli.add_options() + ("es-objects-elasticsearch-url", boost::program_options::value(), "Elasticsearch node url(http://localhost:9200/)") + ("es-objects-auth", boost::program_options::value(), "Basic auth username:password('')") + ("es-objects-bulk-replay", boost::program_options::value(), "Number of bulk documents to index on replay(10000)") + ("es-objects-bulk-sync", boost::program_options::value(), "Number of bulk documents to index on a synchronized chain(100)") + ("es-objects-proposals", boost::program_options::value(), "Store proposal objects(true)") + ("es-objects-accounts", boost::program_options::value(), "Store account objects(true)") + ("es-objects-assets", boost::program_options::value(), "Store asset objects(true)") + ("es-objects-balances", boost::program_options::value(), "Store balances objects(true)") + ("es-objects-limit-orders", boost::program_options::value(), "Store limit order objects(true)") + ("es-objects-asset-bitasset", boost::program_options::value(), "Store feed data(true)") + ("es-objects-index-prefix", boost::program_options::value(), "Add a prefix to the index(ppobjects-)") + ("es-objects-keep-only-current", boost::program_options::value(), "Keep only current state of the objects(true)") + ("es-objects-start-es-after-block", boost::program_options::value(), "Start doing ES job after block(0)") + ; + cfg.add(cli); +} + +void es_objects_plugin::plugin_initialize(const boost::program_options::variables_map& options) +{ + database().applied_block.connect([this](const signed_block &b) { + if(b.block_num() == 1) { + if (!my->genesis()) + FC_THROW_EXCEPTION(fc::exception, "Error populating genesis data."); + } + }); + + database().new_objects.connect([this]( const vector& ids, const flat_set& impacted_accounts ) { + if(!my->index_database(ids, "create")) + { + FC_THROW_EXCEPTION(fc::exception, "Error creating object from ES database, we are going to keep trying."); + } + }); + database().changed_objects.connect([this]( const vector& ids, const flat_set& impacted_accounts ) { + if(!my->index_database(ids, "update")) + { + FC_THROW_EXCEPTION(fc::exception, "Error updating object from ES database, we are going to keep trying."); + } + }); + database().removed_objects.connect([this](const vector& ids, const vector& objs, const flat_set& impacted_accounts) { + if(!my->index_database(ids, "delete")) + { + FC_THROW_EXCEPTION(fc::exception, "Error deleting object from ES database, we are going to keep trying."); + } + }); + + + if (options.count("es-objects-elasticsearch-url")) { + my->_es_objects_elasticsearch_url = options["es-objects-elasticsearch-url"].as(); + } + if (options.count("es-objects-auth")) { + my->_es_objects_auth = options["es-objects-auth"].as(); + } + if (options.count("es-objects-bulk-replay")) { + my->_es_objects_bulk_replay = options["es-objects-bulk-replay"].as(); + } + if (options.count("es-objects-bulk-sync")) { + my->_es_objects_bulk_sync = options["es-objects-bulk-sync"].as(); + } + if (options.count("es-objects-proposals")) { + my->_es_objects_proposals = options["es-objects-proposals"].as(); + } + if (options.count("es-objects-accounts")) { + my->_es_objects_accounts = options["es-objects-accounts"].as(); + } + if (options.count("es-objects-assets")) { + my->_es_objects_assets = options["es-objects-assets"].as(); + } + if (options.count("es-objects-balances")) { + my->_es_objects_balances = options["es-objects-balances"].as(); + } + if (options.count("es-objects-limit-orders")) { + my->_es_objects_limit_orders = options["es-objects-limit-orders"].as(); + } + if (options.count("es-objects-asset-bitasset")) { + my->_es_objects_asset_bitasset = options["es-objects-asset-bitasset"].as(); + } + if (options.count("es-objects-index-prefix")) { + my->_es_objects_index_prefix = options["es-objects-index-prefix"].as(); + } + if (options.count("es-objects-keep-only-current")) { + my->_es_objects_keep_only_current = options["es-objects-keep-only-current"].as(); + } + if (options.count("es-objects-start-es-after-block")) { + my->_es_objects_start_es_after_block = options["es-objects-start-es-after-block"].as(); + } +} + +void es_objects_plugin::plugin_startup() +{ + graphene::utilities::ES es; + es.curl = my->curl; + es.elasticsearch_url = my->_es_objects_elasticsearch_url; + es.auth = my->_es_objects_auth; + es.auth = my->_es_objects_index_prefix; + + if(!graphene::utilities::checkES(es)) + FC_THROW_EXCEPTION(fc::exception, "ES database is not up in url ${url}", ("url", my->_es_objects_elasticsearch_url)); + ilog("elasticsearch OBJECTS: plugin_startup() begin"); +} + +} } diff --git a/libraries/plugins/es_objects/include/graphene/es_objects/es_objects.hpp b/libraries/plugins/es_objects/include/graphene/es_objects/es_objects.hpp new file mode 100644 index 000000000..fa91e3bde --- /dev/null +++ b/libraries/plugins/es_objects/include/graphene/es_objects/es_objects.hpp @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2018 oxarbitrage, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#pragma once + +#include +#include + +namespace graphene { namespace es_objects { + +using namespace chain; + +namespace detail +{ + class es_objects_plugin_impl; +} + +class es_objects_plugin : public graphene::app::plugin +{ + public: + es_objects_plugin(); + virtual ~es_objects_plugin(); + + std::string plugin_name()const override; + std::string plugin_description()const override; + virtual void plugin_set_program_options( + boost::program_options::options_description& cli, + boost::program_options::options_description& cfg) override; + virtual void plugin_initialize(const boost::program_options::variables_map& options) override; + virtual void plugin_startup() override; + + friend class detail::es_objects_plugin_impl; + std::unique_ptr my; +}; + +struct adaptor_struct { + fc::mutable_variant_object adapt(const variant_object &obj) { + fc::mutable_variant_object o(obj); + vector keys_to_rename; + for (auto i = o.begin(); i != o.end(); ++i) { + auto &element = (*i).value(); + if (element.is_object()) { + const string &name = (*i).key(); + auto &vo = element.get_object(); + if (vo.contains(name.c_str())) + keys_to_rename.emplace_back(name); + element = adapt(vo); + } else if (element.is_array()) + adapt(element.get_array()); + } + for (const auto &i : keys_to_rename) { + string new_name = i + "_"; + o[new_name] = variant(o[i]); + o.erase(i); + } + if (o.find("owner") != o.end() && o["owner"].is_string()) + { + o["owner_"] = o["owner"].as_string(); + o.erase("owner"); + } + if (o.find("active_special_authority") != o.end()) + { + o["active_special_authority"] = fc::json::to_string(o["active_special_authority"]); + } + if (o.find("owner_special_authority") != o.end()) + { + o["owner_special_authority"] = fc::json::to_string(o["owner_special_authority"]); + } + if (o.find("feeds") != o.end()) + { + o["feeds"] = fc::json::to_string(o["feeds"]); + } + if (o.find("operations") != o.end()) + { + o["operations"] = fc::json::to_string(o["operations"]); + } + + return o; + } + + void adapt(fc::variants &v) { + for (auto &array_element : v) { + if (array_element.is_object()) + array_element = adapt(array_element.get_object()); + else if (array_element.is_array()) + adapt(array_element.get_array()); + else + array_element = array_element.as_string(); + } + } +}; + +} } //graphene::es_objects diff --git a/libraries/plugins/snapshot/CMakeLists.txt b/libraries/plugins/snapshot/CMakeLists.txt index 227c38604..728740de5 100644 --- a/libraries/plugins/snapshot/CMakeLists.txt +++ b/libraries/plugins/snapshot/CMakeLists.txt @@ -15,3 +15,5 @@ install( TARGETS LIBRARY DESTINATION lib ARCHIVE DESTINATION lib ) + +INSTALL( FILES ${HEADERS} DESTINATION "include/graphene/snapshot" ) diff --git a/libraries/plugins/snapshot/include/graphene/snapshot/snapshot.hpp b/libraries/plugins/snapshot/include/graphene/snapshot/snapshot.hpp index b3ee30c6f..eb8d3a16c 100644 --- a/libraries/plugins/snapshot/include/graphene/snapshot/snapshot.hpp +++ b/libraries/plugins/snapshot/include/graphene/snapshot/snapshot.hpp @@ -35,6 +35,7 @@ class snapshot_plugin : public graphene::app::plugin { ~snapshot_plugin() {} std::string plugin_name()const override; + std::string plugin_description()const override; virtual void plugin_set_program_options( boost::program_options::options_description &command_line_options, diff --git a/libraries/plugins/snapshot/snapshot.cpp b/libraries/plugins/snapshot/snapshot.cpp index fe856ecb1..f74ad5894 100644 --- a/libraries/plugins/snapshot/snapshot.cpp +++ b/libraries/plugins/snapshot/snapshot.cpp @@ -54,6 +54,11 @@ std::string snapshot_plugin::plugin_name()const return "snapshot"; } +std::string snapshot_plugin::plugin_description()const +{ + return "Create snapshots at a specified time or block number."; +} + void snapshot_plugin::plugin_initialize(const boost::program_options::variables_map& options) { try { ilog("snapshot plugin: plugin_initialize() begin"); diff --git a/libraries/utilities/CMakeLists.txt b/libraries/utilities/CMakeLists.txt index f2d646d56..98086b105 100644 --- a/libraries/utilities/CMakeLists.txt +++ b/libraries/utilities/CMakeLists.txt @@ -14,6 +14,7 @@ set(sources string_escape.cpp tempdir.cpp words.cpp + elasticsearch.cpp ${HEADERS}) configure_file("${CMAKE_CURRENT_SOURCE_DIR}/git_revision.cpp.in" "${CMAKE_CURRENT_BINARY_DIR}/git_revision.cpp" @ONLY) diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp new file mode 100644 index 000000000..11a9561bf --- /dev/null +++ b/libraries/utilities/elasticsearch.cpp @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2018 oxarbitrage, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include + +#include +#include +#include +#include + +size_t WriteCallback(void *contents, size_t size, size_t nmemb, void *userp) +{ + ((std::string*)userp)->append((char*)contents, size * nmemb); + return size * nmemb; +} + +namespace graphene { namespace utilities { + +bool checkES(ES& es) +{ + graphene::utilities::CurlRequest curl_request; + curl_request.handler = es.curl; + curl_request.url = es.elasticsearch_url + "_nodes"; + curl_request.auth = es.auth; + curl_request.type = "GET"; + + if(doCurl(curl_request).empty()) + return false; + return true; + +} +const std::string simpleQuery(ES& es) +{ + graphene::utilities::CurlRequest curl_request; + curl_request.handler = es.curl; + curl_request.url = es.elasticsearch_url + es.endpoint; + curl_request.auth = es.auth; + curl_request.type = "POST"; + curl_request.query = es.query; + + return doCurl(curl_request); +} + +bool SendBulk(ES& es) +{ + std::string bulking = joinBulkLines(es.bulk_lines); + + graphene::utilities::CurlRequest curl_request; + curl_request.handler = es.curl; + curl_request.url = es.elasticsearch_url + "_bulk"; + curl_request.auth = es.auth; + curl_request.type = "POST"; + curl_request.query = bulking; + + auto curlResponse = doCurl(curl_request); + + if(handleBulkResponse(getResponseCode(curl_request.handler), curlResponse)) + return true; + return false; +} + +const std::string joinBulkLines(const std::vector& bulk) +{ + auto bulking = boost::algorithm::join(bulk, "\n"); + bulking = bulking + "\n"; + + return bulking; +} +long getResponseCode(CURL *handler) +{ + long http_code = 0; + curl_easy_getinfo (handler, CURLINFO_RESPONSE_CODE, &http_code); + return http_code; +} + +bool handleBulkResponse(long http_code, const std::string& CurlReadBuffer) +{ + if(http_code == 200) { + // all good, but check errors in response + fc::variant j = fc::json::from_string(CurlReadBuffer); + bool errors = j["errors"].as_bool(); + if(errors == true) { + return false; + } + } + else { + if(http_code == 413) { + elog( "413 error: Can be low disk space" ); + } + else if(http_code == 401) { + elog( "401 error: Unauthorized" ); + } + else { + elog( std::to_string(http_code) + " error: Unknown error" ); + } + return false; + } + return true; +} + +const std::vector createBulk(const fc::mutable_variant_object& bulk_header, const std::string& data) +{ + std::vector bulk; + fc::mutable_variant_object final_bulk_header; + final_bulk_header["index"] = bulk_header; + bulk.push_back(fc::json::to_string(final_bulk_header)); + bulk.push_back(data); + + return bulk; +} + +bool deleteAll(ES& es) +{ + graphene::utilities::CurlRequest curl_request; + curl_request.handler = es.curl; + curl_request.url = es.elasticsearch_url + es.index_prefix + "*"; + curl_request.auth = es.auth; + curl_request.type = "DELETE"; + + auto curl_response = doCurl(curl_request); + if(curl_response.empty()) + return false; + else + return true; +} +const std::string getEndPoint(ES& es) +{ + graphene::utilities::CurlRequest curl_request; + curl_request.handler = es.curl; + curl_request.url = es.elasticsearch_url + es.endpoint; + curl_request.auth = es.auth; + curl_request.type = "GET"; + + return doCurl(curl_request); +} + +const std::string generateIndexName(const fc::time_point_sec& block_date, const std::string& _elasticsearch_index_prefix) +{ + auto block_date_string = block_date.to_iso_string(); + std::vector parts; + boost::split(parts, block_date_string, boost::is_any_of("-")); + std::string index_name = _elasticsearch_index_prefix + parts[0] + "-" + parts[1]; + return index_name; +} + +const std::string doCurl(CurlRequest& curl) +{ + std::string CurlReadBuffer; + struct curl_slist *headers = NULL; + headers = curl_slist_append(headers, "Content-Type: application/json"); + + curl_easy_setopt(curl.handler, CURLOPT_HTTPHEADER, headers); + curl_easy_setopt(curl.handler, CURLOPT_URL, curl.url.c_str()); + curl_easy_setopt(curl.handler, CURLOPT_CUSTOMREQUEST, curl.type.c_str()); + if(curl.type == "POST") + { + curl_easy_setopt(curl.handler, CURLOPT_POST, true); + curl_easy_setopt(curl.handler, CURLOPT_POSTFIELDS, curl.query.c_str()); + } + curl_easy_setopt(curl.handler, CURLOPT_WRITEFUNCTION, WriteCallback); + curl_easy_setopt(curl.handler, CURLOPT_WRITEDATA, (void *)&CurlReadBuffer); + curl_easy_setopt(curl.handler, CURLOPT_USERAGENT, "libcrp/0.1"); + if(!curl.auth.empty()) + curl_easy_setopt(curl.handler, CURLOPT_USERPWD, curl.auth.c_str()); + curl_easy_perform(curl.handler); + + return CurlReadBuffer; +} + +} } // end namespace graphene::utilities diff --git a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp new file mode 100644 index 000000000..898464b16 --- /dev/null +++ b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2018 oxarbitrage, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#pragma once +#include +#include +#include + +#include +#include +#include + +size_t WriteCallback(void *contents, size_t size, size_t nmemb, void *userp); + +namespace graphene { namespace utilities { + + class ES { + public: + CURL *curl; + std::vector bulk_lines; + std::string elasticsearch_url; + std::string index_prefix; + std::string auth; + std::string endpoint; + std::string query; + }; + class CurlRequest { + public: + CURL *handler; + std::string url; + std::string type; + std::string auth; + std::string query; + }; + + bool SendBulk(ES& es); + const std::vector createBulk(const fc::mutable_variant_object& bulk_header, const std::string& data); + bool checkES(ES& es); + const std::string simpleQuery(ES& es); + bool deleteAll(ES& es); + bool handleBulkResponse(long http_code, const std::string& CurlReadBuffer); + const std::string getEndPoint(ES& es); + const std::string generateIndexName(const fc::time_point_sec& block_date, const std::string& _elasticsearch_index_prefix); + const std::string doCurl(CurlRequest& curl); + const std::string joinBulkLines(const std::vector& bulk); + long getResponseCode(CURL *handler); + +} } // end namespace graphene::utilities diff --git a/libraries/wallet/wallet.cpp b/libraries/wallet/wallet.cpp index 2a8bf595a..0bbf305a2 100644 --- a/libraries/wallet/wallet.cpp +++ b/libraries/wallet/wallet.cpp @@ -1100,7 +1100,7 @@ class wallet_api_impl if( wallet_filename == "" ) wallet_filename = _wallet_filename; - wlog( "saving wallet to file ${fn}", ("fn", wallet_filename) ); + ilog( "saving wallet to file ${fn}", ("fn", wallet_filename) ); string data = fc::json::to_pretty_string( _wallet ); try @@ -1112,14 +1112,38 @@ class wallet_api_impl // // http://en.wikipedia.org/wiki/Most_vexing_parse // - fc::ofstream outfile{ fc::path( wallet_filename ) }; + std::string tmp_wallet_filename = wallet_filename + ".tmp"; + fc::ofstream outfile{ fc::path( tmp_wallet_filename ) }; outfile.write( data.c_str(), data.length() ); outfile.flush(); outfile.close(); + + ilog( "saved successfully wallet to tmp file ${fn}", ("fn", tmp_wallet_filename) ); + + std::string wallet_file_content; + fc::read_file_contents(tmp_wallet_filename, wallet_file_content); + + if (wallet_file_content == data) { + dlog( "validated successfully tmp wallet file ${fn}", ("fn", tmp_wallet_filename) ); + fc::rename( tmp_wallet_filename, wallet_filename ); + dlog( "renamed successfully tmp wallet file ${fn}", ("fn", tmp_wallet_filename) ); + } + else + { + FC_THROW("tmp wallet file cannot be validated ${fn}", ("fn", tmp_wallet_filename) ); + } + + ilog( "successfully saved wallet to file ${fn}", ("fn", wallet_filename) ); + disable_umask_protection(); } catch(...) { + string ws_password = _wallet.ws_password; + _wallet.ws_password = ""; + elog("wallet file content is: ${data}", ("data", fc::json::to_pretty_string( _wallet ) ) ); + _wallet.ws_password = ws_password; + disable_umask_protection(); throw; } @@ -2122,7 +2146,7 @@ class wallet_api_impl asset_object asset_obj = get_asset( asset_symbol ); vector< vesting_balance_object > vbos; - vesting_balance_object vbo; + vesting_balance_object vbo; fc::optional vbid = maybe_id(account_name); if( !vbid ) { diff --git a/programs/cli_wallet/main.cpp b/programs/cli_wallet/main.cpp index 9cb8750dd..b7abdabef 100644 --- a/programs/cli_wallet/main.cpp +++ b/programs/cli_wallet/main.cpp @@ -113,11 +113,13 @@ int main( int argc, char** argv ) cfg.appenders.push_back(fc::appender_config( "rpc", "file", fc::variant(ac, 5))); cfg.loggers = { fc::logger_config("default"), fc::logger_config( "rpc") }; - cfg.loggers.front().level = fc::log_level::info; + cfg.loggers.front().level = fc::log_level::warn; cfg.loggers.front().appenders = {"default"}; - cfg.loggers.back().level = fc::log_level::debug; + cfg.loggers.back().level = fc::log_level::info; cfg.loggers.back().appenders = {"rpc"}; + fc::configure_logging( cfg ); + fc::ecc::private_key committee_private_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("null_key"))); idump( (key_to_wif( committee_private_key ) ) ); diff --git a/programs/witness_node/CMakeLists.txt b/programs/witness_node/CMakeLists.txt index 3d03253b3..c83fc3635 100644 --- a/programs/witness_node/CMakeLists.txt +++ b/programs/witness_node/CMakeLists.txt @@ -11,7 +11,7 @@ endif() # We have to link against graphene_debug_witness because deficiency in our API infrastructure doesn't allow plugins to be fully abstracted #246 target_link_libraries( witness_node - PRIVATE graphene_app graphene_account_history graphene_affiliate_stats graphene_market_history graphene_witness graphene_chain graphene_debug_witness graphene_bookie graphene_egenesis_full fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) + PRIVATE graphene_app graphene_account_history graphene_affiliate_stats graphene_elasticsearch graphene_market_history graphene_witness graphene_chain graphene_debug_witness graphene_bookie graphene_egenesis_full graphene_snapshot graphene_es_objects fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) # also add dependencies to graphene_generate_genesis graphene_generate_uia_sharedrop_genesis if you want those plugins install( TARGETS diff --git a/programs/witness_node/genesis.json b/programs/witness_node/genesis.json index ab153e7bd..e07cec92b 100644 --- a/programs/witness_node/genesis.json +++ b/programs/witness_node/genesis.json @@ -1,5 +1,5 @@ { - "initial_timestamp": "2019-05-14T18:47:51", + "initial_timestamp": "2020-01-13T06:03:15", "max_core_supply": "1000000000000000", "initial_parameters": { "current_fees": { @@ -307,6 +307,27 @@ 75,{} ],[ 76,{} + ],[ + 77,{ + "lottery_asset": 2000000, + "price_per_kbyte": 10 + } + ],[ + 78,{ + "fee": 0 + } + ],[ + 79,{ + "fee": 0 + } + ],[ + 80,{ + "fee": 0 + } + ],[ + 81,{ + "fee": 2000000 + } ] ], "scale": 10000 @@ -352,7 +373,19 @@ "maximum_tournament_start_time_in_future": 2419200, "maximum_tournament_start_delay": 604800, "maximum_tournament_number_of_wins": 100, - "extensions": {} + "extensions": { + "min_bet_multiplier": 10100, + "max_bet_multiplier": 10000000, + "betting_rake_fee_percentage": 300, + "live_betting_delay_time": 5, + "sweeps_distribution_percentage": 200, + "sweeps_distribution_asset": "1.3.0", + "sweeps_vesting_accumulator_account": "1.2.0", + "gpos_period": 15552000, + "gpos_subperiod": 2592000, + "gpos_period_start": 1601528400, + "gpos_vesting_lockin_period": 2592000 + } }, "initial_bts_accounts": [], "initial_accounts": [{ diff --git a/programs/witness_node/main.cpp b/programs/witness_node/main.cpp index 8c613067a..4d49d96f3 100644 --- a/programs/witness_node/main.cpp +++ b/programs/witness_node/main.cpp @@ -25,15 +25,18 @@ #include #include +#include #include #include +#include +#include #include //#include //#include #include #include #include -//#include +#include #include #include @@ -71,14 +74,17 @@ int main(int argc, char** argv) { bpo::variables_map options; auto witness_plug = node->register_plugin(); + auto debug_witness_plug = node->register_plugin(); auto history_plug = node->register_plugin(); + auto elasticsearch_plug = node->register_plugin(); + auto es_objects_plug = node->register_plugin(); auto market_history_plug = node->register_plugin(); //auto generate_genesis_plug = node->register_plugin(); //auto generate_uia_sharedrop_genesis_plug = node->register_plugin(); auto list_plug = node->register_plugin(); auto affiliate_stats_plug = node->register_plugin(); auto bookie_plug = node->register_plugin(); -// auto snapshot_plug = node->register_plugin(); + auto snapshot_plug = node->register_plugin(); try { @@ -142,7 +148,7 @@ int main(int argc, char** argv) { exit_promise->set_value(signal); }, SIGTERM); - ilog("Started witness node on a chain with ${h} blocks.", ("h", node->chain_database()->head_block_num())); + ilog("Started Peerplays node on a chain with ${h} blocks.", ("h", node->chain_database()->head_block_num())); ilog("Chain ID is ${id}", ("id", node->chain_database()->get_chain_id()) ); int signal = exit_promise->wait(); @@ -163,4 +169,4 @@ int main(int argc, char** argv) { delete node; return 1; } -} \ No newline at end of file +} diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 44af778be..e57e3374a 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -8,38 +8,38 @@ endif() file(GLOB UNIT_TESTS "tests/*.cpp") add_executable( chain_test ${UNIT_TESTS} ${COMMON_SOURCES} ) -target_link_libraries( chain_test graphene_chain graphene_app graphene_account_history graphene_bookie graphene_egenesis_none fc graphene_wallet ${PLATFORM_SPECIFIC_LIBS} ) +target_link_libraries( chain_test graphene_chain graphene_app graphene_account_history graphene_elasticsearch graphene_es_objects graphene_bookie graphene_egenesis_none fc graphene_wallet ${PLATFORM_SPECIFIC_LIBS} ) if(MSVC) set_source_files_properties( tests/serialization_tests.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) endif(MSVC) file(GLOB PERFORMANCE_TESTS "performance/*.cpp") add_executable( performance_test ${PERFORMANCE_TESTS} ${COMMON_SOURCES} ) -target_link_libraries( performance_test graphene_chain graphene_app graphene_account_history graphene_bookie graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) +target_link_libraries( performance_test graphene_chain graphene_app graphene_account_history graphene_elasticsearch graphene_es_objects graphene_bookie graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) file(GLOB BENCH_MARKS "benchmarks/*.cpp") add_executable( chain_bench ${BENCH_MARKS} ${COMMON_SOURCES} ) -target_link_libraries( chain_bench graphene_chain graphene_app graphene_account_history graphene_bookie graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) +target_link_libraries( chain_bench graphene_chain graphene_app graphene_account_history graphene_elasticsearch graphene_es_objects graphene_bookie graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) file(GLOB APP_SOURCES "app/*.cpp") add_executable( app_test ${APP_SOURCES} ) -target_link_libraries( app_test graphene_app graphene_account_history graphene_bookie graphene_net graphene_chain graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) +target_link_libraries( app_test graphene_app graphene_account_history graphene_elasticsearch graphene_es_objects graphene_witness graphene_bookie graphene_net graphene_chain graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) file(GLOB INTENSE_SOURCES "intense/*.cpp") add_executable( intense_test ${INTENSE_SOURCES} ${COMMON_SOURCES} ) -target_link_libraries( intense_test graphene_chain graphene_app graphene_account_history graphene_bookie graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) +target_link_libraries( intense_test graphene_chain graphene_app graphene_account_history graphene_elasticsearch graphene_es_objects graphene_bookie graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) file(GLOB BETTING_TESTS "betting/*.cpp") add_executable( betting_test ${BETTING_TESTS} ${COMMON_SOURCES} ) -target_link_libraries( betting_test graphene_chain graphene_app graphene_account_history graphene_bookie graphene_egenesis_none fc graphene_wallet ${PLATFORM_SPECIFIC_LIBS} ) +target_link_libraries( betting_test graphene_chain graphene_app graphene_account_history graphene_elasticsearch graphene_es_objects graphene_bookie graphene_egenesis_none fc graphene_wallet ${PLATFORM_SPECIFIC_LIBS} ) file(GLOB TOURNAMENT_TESTS "tournament/*.cpp") add_executable( tournament_test ${TOURNAMENT_TESTS} ${COMMON_SOURCES} ) -target_link_libraries( tournament_test graphene_chain graphene_app graphene_account_history graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) +target_link_libraries( tournament_test graphene_chain graphene_app graphene_account_history graphene_elasticsearch graphene_es_objects graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) file(GLOB RANDOM_SOURCES "random/*.cpp") add_executable( random_test ${RANDOM_SOURCES} ${COMMON_SOURCES} ) -target_link_libraries( random_test graphene_chain graphene_app graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) +target_link_libraries( random_test graphene_chain graphene_app graphene_elasticsearch graphene_es_objects graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) file(GLOB CLI_SOURCES "cli/*.cpp") add_executable( cli_test ${CLI_SOURCES} ) @@ -51,4 +51,8 @@ if(MSVC) set_source_files_properties( cli/main.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) endif(MSVC) +file(GLOB ES_SOURCES "elasticsearch/*.cpp") +add_executable( es_test ${ES_SOURCES} ${COMMON_SOURCES} ) +target_link_libraries( es_test graphene_chain graphene_app graphene_account_history graphene_elasticsearch graphene_es_objects graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) + add_subdirectory( generate_empty_blocks ) diff --git a/tests/app/main.cpp b/tests/app/main.cpp index 98f19c197..623f760e0 100644 --- a/tests/app/main.cpp +++ b/tests/app/main.cpp @@ -28,8 +28,12 @@ #include +#include #include - +#include +#include +#include +#include #include #include @@ -56,7 +60,13 @@ BOOST_AUTO_TEST_CASE( two_node_network ) BOOST_TEST_MESSAGE( "Creating and initializing app1" ); graphene::app::application app1; + app1.register_plugin(); app1.register_plugin(); + app1.register_plugin(); + app1.register_plugin(); + app1.register_plugin(); + app1.register_plugin(); + boost::program_options::variables_map cfg; cfg.emplace("p2p-endpoint", boost::program_options::variable_value(string("127.0.0.1:0"), false)); app1.initialize(app_dir.path(), cfg); @@ -71,7 +81,12 @@ BOOST_AUTO_TEST_CASE( two_node_network ) auto cfg2 = cfg; graphene::app::application app2; - app2.register_plugin(); + app2.register_plugin(); + app2.register_plugin(); + app2.register_plugin(); + app2.register_plugin(); + app2.register_plugin(); + app2.register_plugin(); cfg2.erase("p2p-endpoint"); cfg2.emplace("p2p-endpoint", boost::program_options::variable_value(string("127.0.0.1:0"), false)); cfg2.emplace("seed-node", boost::program_options::variable_value(vector{endpoint1}, false)); diff --git a/tests/betting/betting_tests.cpp b/tests/betting/betting_tests.cpp index fccdb2538..540a33a33 100644 --- a/tests/betting/betting_tests.cpp +++ b/tests/betting/betting_tests.cpp @@ -3012,7 +3012,7 @@ boost::unit_test::test_suite* init_unit_test_suite(int argc, char* argv[]) { std::cout << "Random number generator seeded to " << time(NULL) << std::endl; // betting operations don't take effect until HARDFORK 1000 - GRAPHENE_TESTING_GENESIS_TIMESTAMP = HARDFORK_1000_TIME.sec_since_epoch() + 10; + GRAPHENE_TESTING_GENESIS_TIMESTAMP = HARDFORK_1000_TIME.sec_since_epoch() + 15; return nullptr; } diff --git a/tests/cli/main.cpp b/tests/cli/main.cpp index cfde25d60..9e7a41193 100644 --- a/tests/cli/main.cpp +++ b/tests/cli/main.cpp @@ -30,6 +30,8 @@ #include #include #include +#include +#include #include #include @@ -125,9 +127,11 @@ std::shared_ptr start_application(fc::temp_directory std::shared_ptr app1(new graphene::app::application{}); app1->register_plugin< graphene::bookie::bookie_plugin>(); - app1->register_plugin(); + app1->register_plugin< graphene::account_history::account_history_plugin>(); app1->register_plugin< graphene::market_history::market_history_plugin >(); app1->register_plugin< graphene::witness_plugin::witness_plugin >(); + app1->register_plugin< graphene::accounts_list::accounts_list_plugin >(); + app1->register_plugin< graphene::affiliate_stats::affiliate_stats_plugin >(); app1->startup_plugins(); boost::program_options::variables_map cfg; #ifdef _WIN32 diff --git a/tests/common/database_fixture.cpp b/tests/common/database_fixture.cpp index 7a94fe103..4eb619962 100644 --- a/tests/common/database_fixture.cpp +++ b/tests/common/database_fixture.cpp @@ -29,11 +29,9 @@ #include #include #include +#include +#include -#include - -#include -#include #include #include #include @@ -51,16 +49,16 @@ #include #include -#include #include -#include #include "database_fixture.hpp" using namespace graphene::chain::test; +//redefining parameters here to as per updated TESTNET parameters to verify unit test cases uint32_t GRAPHENE_TESTING_GENESIS_TIMESTAMP = 1431700002; + namespace graphene { namespace chain { using std::cout; @@ -81,7 +79,7 @@ database_fixture::database_fixture() std::cout << "running test " << boost::unit_test::framework::current_test_case().p_name << std::endl; } - auto ahplugin = app.register_plugin(); + //auto ahplugin = app.register_plugin(); auto mhplugin = app.register_plugin(); auto bookieplugin = app.register_plugin(); auto affiliateplugin = app.register_plugin(); @@ -134,8 +132,51 @@ database_fixture::database_fixture() } // app.initialize(); - ahplugin->plugin_set_app(&app); - ahplugin->plugin_initialize(options); + + auto test_name = boost::unit_test::framework::current_test_case().p_name.value; + if(test_name == "elasticsearch_account_history" || test_name == "elasticsearch_suite" || + test_name == "elasticsearch_history_api") { + auto esplugin = app.register_plugin(); + esplugin->plugin_set_app(&app); + + options.insert(std::make_pair("elasticsearch-node-url", boost::program_options::variable_value(string("http://localhost:9200/"), false))); + options.insert(std::make_pair("elasticsearch-bulk-replay", boost::program_options::variable_value(uint32_t(2), false))); + options.insert(std::make_pair("elasticsearch-bulk-sync", boost::program_options::variable_value(uint32_t(2), false))); + options.insert(std::make_pair("elasticsearch-start-es-after-block", boost::program_options::variable_value(uint32_t(0), false))); + options.insert(std::make_pair("elasticsearch-visitor", boost::program_options::variable_value(false, false))); + options.insert(std::make_pair("elasticsearch-operation-object", boost::program_options::variable_value(true, false))); + options.insert(std::make_pair("elasticsearch-operation-string", boost::program_options::variable_value(true, false))); + options.insert(std::make_pair("elasticsearch-mode", boost::program_options::variable_value(uint16_t(2), false))); + + esplugin->plugin_initialize(options); + esplugin->plugin_startup(); + } + else { + auto ahplugin = app.register_plugin(); + app.enable_plugin("affiliate_stats"); + ahplugin->plugin_set_app(&app); + ahplugin->plugin_initialize(options); + ahplugin->plugin_startup(); + } + + if(test_name == "elasticsearch_objects" || test_name == "elasticsearch_suite") { + auto esobjects_plugin = app.register_plugin(); + esobjects_plugin->plugin_set_app(&app); + + options.insert(std::make_pair("es-objects-elasticsearch-url", boost::program_options::variable_value(string("http://localhost:9200/"), false))); + options.insert(std::make_pair("es-objects-bulk-replay", boost::program_options::variable_value(uint32_t(2), false))); + options.insert(std::make_pair("es-objects-bulk-sync", boost::program_options::variable_value(uint32_t(2), false))); + options.insert(std::make_pair("es-objects-proposals", boost::program_options::variable_value(true, false))); + options.insert(std::make_pair("es-objects-accounts", boost::program_options::variable_value(true, false))); + options.insert(std::make_pair("es-objects-assets", boost::program_options::variable_value(true, false))); + options.insert(std::make_pair("es-objects-balances", boost::program_options::variable_value(true, false))); + options.insert(std::make_pair("es-objects-limit-orders", boost::program_options::variable_value(true, false))); + options.insert(std::make_pair("es-objects-asset-bitasset", boost::program_options::variable_value(true, false))); + + esobjects_plugin->plugin_initialize(options); + esobjects_plugin->plugin_startup(); + } + mhplugin->plugin_set_app(&app); mhplugin->plugin_initialize(options); bookieplugin->plugin_set_app(&app); @@ -143,7 +184,6 @@ database_fixture::database_fixture() affiliateplugin->plugin_set_app(&app); affiliateplugin->plugin_initialize(options); - ahplugin->plugin_startup(); mhplugin->plugin_startup(); bookieplugin->plugin_startup(); affiliateplugin->plugin_startup(); diff --git a/tests/elasticsearch/main.cpp b/tests/elasticsearch/main.cpp new file mode 100644 index 000000000..28d3522ca --- /dev/null +++ b/tests/elasticsearch/main.cpp @@ -0,0 +1,535 @@ +/* + * Copyright (c) 2018 oxarbitrage and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include +#include +#include + +#include +#include + +#include "../common/database_fixture.hpp" + +#define BOOST_TEST_MODULE Elastic Search Database Tests +#include + +using namespace graphene::chain; +using namespace graphene::chain::test; +using namespace graphene::app; + +BOOST_FIXTURE_TEST_SUITE( elasticsearch_tests, database_fixture ) + +BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { + try { + + CURL *curl; // curl handler + curl = curl_easy_init(); + + graphene::utilities::ES es; + es.curl = curl; + es.elasticsearch_url = "http://localhost:9200/"; + es.index_prefix = "peerplays-"; + //es.auth = "elastic:changeme"; + + // delete all first + auto delete_account_history = graphene::utilities::deleteAll(es); + fc::usleep(fc::milliseconds(1000)); // this is because index.refresh_interval, nothing to worry + + if(delete_account_history) { // all records deleted + + //account_id_type() do 3 ops + create_bitasset("USD", account_id_type()); + auto dan = create_account("dan"); + auto bob = create_account("bob"); + + generate_block(); + fc::usleep(fc::milliseconds(1000)); + + // for later use + //int asset_crobjeate_op_id = operation::tag::value; + //int account_create_op_id = operation::tag::value; + + string query = "{ \"query\" : { \"bool\" : { \"must\" : [{\"match_all\": {}}] } } }"; + es.endpoint = es.index_prefix + "*/data/_count"; + es.query = query; + + auto res = graphene::utilities::simpleQuery(es); + variant j = fc::json::from_string(res); + auto total = j["count"].as_string(); + BOOST_CHECK_EQUAL(total, "5"); + + es.endpoint = es.index_prefix + "*/data/_search"; + res = graphene::utilities::simpleQuery(es); + j = fc::json::from_string(res); + auto first_id = j["hits"]["hits"][size_t(0)]["_id"].as_string(); + BOOST_CHECK_EQUAL(first_id, "2.9.0"); + + generate_block(); + auto willie = create_account("willie"); + generate_block(); + + fc::usleep(fc::milliseconds(1000)); // index.refresh_interval + + es.endpoint = es.index_prefix + "*/data/_count"; + res = graphene::utilities::simpleQuery(es); + j = fc::json::from_string(res); + + total = j["count"].as_string(); + BOOST_CHECK_EQUAL(total, "7"); + + // do some transfers in 1 block + transfer(account_id_type()(db), bob, asset(100)); + transfer(account_id_type()(db), bob, asset(200)); + transfer(account_id_type()(db), bob, asset(300)); + + generate_block(); + fc::usleep(fc::milliseconds(1000)); // index.refresh_interval + + res = graphene::utilities::simpleQuery(es); + j = fc::json::from_string(res); + + total = j["count"].as_string(); + BOOST_CHECK_EQUAL(total, "13"); + + // check the visitor data + auto block_date = db.head_block_time(); + std::string index_name = graphene::utilities::generateIndexName(block_date, "peerplays-"); + + es.endpoint = index_name + "/data/2.9.12"; // we know last op is a transfer of amount 300 + res = graphene::utilities::getEndPoint(es); + j = fc::json::from_string(res); + auto last_transfer_amount = j["_source"]["operation_history"]["op_object"]["amount_"]["amount"].as_string(); + BOOST_CHECK_EQUAL(last_transfer_amount, "300"); + } + } + catch (fc::exception &e) { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_CASE(elasticsearch_objects) { + try { + + CURL *curl; // curl handler + curl = curl_easy_init(); + + graphene::utilities::ES es; + es.curl = curl; + es.elasticsearch_url = "http://localhost:9200/"; + es.index_prefix = "ppobjects-"; + //es.auth = "elastic:changeme"; + + // delete all first + auto delete_objects = graphene::utilities::deleteAll(es); + + generate_block(); + fc::usleep(fc::milliseconds(1000)); + + if(delete_objects) { // all records deleted + + // asset and bitasset + create_bitasset("USD", account_id_type()); + generate_block(); + fc::usleep(fc::milliseconds(1000)); + + string query = "{ \"query\" : { \"bool\" : { \"must\" : [{\"match_all\": {}}] } } }"; + es.endpoint = es.index_prefix + "*/data/_count"; + es.query = query; + + auto res = graphene::utilities::simpleQuery(es); + variant j = fc::json::from_string(res); + auto total = j["count"].as_string(); + BOOST_CHECK_EQUAL(total, "2"); + + es.endpoint = es.index_prefix + "asset/data/_search"; + res = graphene::utilities::simpleQuery(es); + j = fc::json::from_string(res); + auto first_id = j["hits"]["hits"][size_t(0)]["_source"]["symbol"].as_string(); + BOOST_CHECK_EQUAL(first_id, "USD"); + + auto bitasset_data_id = j["hits"]["hits"][size_t(0)]["_source"]["bitasset_data_id"].as_string(); + es.endpoint = es.index_prefix + "bitasset/data/_search"; + es.query = "{ \"query\" : { \"bool\": { \"must\" : [{ \"term\": { \"object_id\": \""+bitasset_data_id+"\"}}] } } }"; + res = graphene::utilities::simpleQuery(es); + j = fc::json::from_string(res); + auto bitasset_object_id = j["hits"]["hits"][size_t(0)]["_source"]["object_id"].as_string(); + BOOST_CHECK_EQUAL(bitasset_object_id, bitasset_data_id); + } + } + catch (fc::exception &e) { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_CASE(elasticsearch_suite) { + try { + + CURL *curl; // curl handler + curl = curl_easy_init(); + + graphene::utilities::ES es; + es.curl = curl; + es.elasticsearch_url = "http://localhost:9200/"; + es.index_prefix = "peerplays-"; + auto delete_account_history = graphene::utilities::deleteAll(es); + fc::usleep(fc::milliseconds(1000)); + es.index_prefix = "ppobjects-"; + auto delete_objects = graphene::utilities::deleteAll(es); + fc::usleep(fc::milliseconds(1000)); + + if(delete_account_history && delete_objects) { // all records deleted + + + } + } + catch (fc::exception &e) { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_CASE(elasticsearch_history_api) { + try { + CURL *curl; // curl handler + curl = curl_easy_init(); + + graphene::utilities::ES es; + es.curl = curl; + es.elasticsearch_url = "http://localhost:9200/"; + es.index_prefix = "peerplays-"; + + auto delete_account_history = graphene::utilities::deleteAll(es); + + generate_block(); + fc::usleep(fc::milliseconds(1000)); + + if(delete_account_history) { + + create_bitasset("USD", account_id_type()); // create op 0 + const account_object& dan = create_account("dan"); // create op 1 + create_bitasset("CNY", dan.id); // create op 2 + create_bitasset("BTC", account_id_type()); // create op 3 + create_bitasset("XMR", dan.id); // create op 4 + create_bitasset("EUR", account_id_type()); // create op 5 + create_bitasset("OIL", dan.id); // create op 6 + + generate_block(); + fc::usleep(fc::milliseconds(1000)); + + graphene::app::history_api hist_api(app); + app.enable_plugin("elasticsearch"); + + // f(A, 0, 4, 9) = { 5, 3, 1, 0 } + auto histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 4, operation_history_id_type(9)); + + BOOST_CHECK_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 0u); + + // f(A, 0, 4, 6) = { 5, 3, 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 4, operation_history_id_type(6)); + BOOST_CHECK_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 0u); + + // f(A, 0, 4, 5) = { 5, 3, 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 4, operation_history_id_type(5)); + BOOST_CHECK_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 0u); + + // f(A, 0, 4, 4) = { 3, 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 4, operation_history_id_type(4)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 0u); + + // f(A, 0, 4, 3) = { 3, 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 4, operation_history_id_type(3)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 0u); + + // f(A, 0, 4, 2) = { 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 4, operation_history_id_type(2)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 0u); + + // f(A, 0, 4, 1) = { 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 4, operation_history_id_type(1)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 0u); + + // f(A, 0, 4, 0) = { 5, 3, 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 4, operation_history_id_type()); + BOOST_CHECK_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 0u); + + // f(A, 1, 5, 9) = { 5, 3 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(9)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + + // f(A, 1, 5, 6) = { 5, 3 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(6)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + + // f(A, 1, 5, 5) = { 5, 3 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(5)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + + // f(A, 1, 5, 4) = { 3 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(4)); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u); + + // f(A, 1, 5, 3) = { 3 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(3)); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u); + + // f(A, 1, 5, 2) = { } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(2)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // f(A, 1, 5, 1) = { } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(1)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // f(A, 1, 5, 0) = { 5, 3 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + + // f(A, 0, 3, 9) = { 5, 3, 1 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 3, operation_history_id_type(9)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + + // f(A, 0, 3, 6) = { 5, 3, 1 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 3, operation_history_id_type(6)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + + // f(A, 0, 3, 5) = { 5, 3, 1 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 3, operation_history_id_type(5)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + + // f(A, 0, 3, 4) = { 3, 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 3, operation_history_id_type(4)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 0u); + + // f(A, 0, 3, 3) = { 3, 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 3, operation_history_id_type(3)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 0u); + + // f(A, 0, 3, 2) = { 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 3, operation_history_id_type(2)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 0u); + + // f(A, 0, 3, 1) = { 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 3, operation_history_id_type(1)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 0u); + + // f(A, 0, 3, 0) = { 5, 3, 1 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 3, operation_history_id_type()); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + + // f(B, 0, 4, 9) = { 6, 4, 2, 1 } + histories = hist_api.get_account_history("dan", operation_history_id_type(), 4, operation_history_id_type(9)); + BOOST_CHECK_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 2u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 1u); + + // f(B, 0, 4, 6) = { 6, 4, 2, 1 } + histories = hist_api.get_account_history("dan", operation_history_id_type(), 4, operation_history_id_type(6)); + BOOST_CHECK_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 2u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 1u); + + // f(B, 0, 4, 5) = { 4, 2, 1 } + histories = hist_api.get_account_history("dan", operation_history_id_type(), 4, operation_history_id_type(5)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 4u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 2u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + + // f(B, 0, 4, 4) = { 4, 2, 1 } + histories = hist_api.get_account_history("dan", operation_history_id_type(), 4, operation_history_id_type(4)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 4u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 2u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + + // f(B, 0, 4, 3) = { 2, 1 } + histories = hist_api.get_account_history("dan", operation_history_id_type(), 4, operation_history_id_type(3)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 2u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 1u); + + // f(B, 0, 4, 2) = { 2, 1 } + histories = hist_api.get_account_history("dan", operation_history_id_type(), 4, operation_history_id_type(2)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 2u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 1u); + + // f(B, 0, 4, 1) = { 1 } + histories = hist_api.get_account_history("dan", operation_history_id_type(), 4, operation_history_id_type(1)); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 1u); + + // f(B, 0, 4, 0) = { 6, 4, 2, 1 } + histories = hist_api.get_account_history("dan", operation_history_id_type(), 4, operation_history_id_type()); + BOOST_CHECK_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 2u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 1u); + + // f(B, 2, 4, 9) = { 6, 4 } + histories = hist_api.get_account_history("dan", operation_history_id_type(2), 4, operation_history_id_type(9)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u); + + // f(B, 2, 4, 6) = { 6, 4 } + histories = hist_api.get_account_history("dan", operation_history_id_type(2), 4, operation_history_id_type(6)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u); + + // f(B, 2, 4, 5) = { 4 } + histories = hist_api.get_account_history("dan", operation_history_id_type(2), 4, operation_history_id_type(5)); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 4u); + + // f(B, 2, 4, 4) = { 4 } + histories = hist_api.get_account_history("dan", operation_history_id_type(2), 4, operation_history_id_type(4)); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 4u); + + // f(B, 2, 4, 3) = { } + histories = hist_api.get_account_history("dan", operation_history_id_type(2), 4, operation_history_id_type(3)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // f(B, 2, 4, 2) = { } + histories = hist_api.get_account_history("dan", operation_history_id_type(2), 4, operation_history_id_type(2)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // f(B, 2, 4, 1) = { } + histories = hist_api.get_account_history("dan", operation_history_id_type(2), 4, operation_history_id_type(1)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // f(B, 2, 4, 0) = { 6, 4 } + histories = hist_api.get_account_history("dan", operation_history_id_type(2), 4, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u); + + // 0 limits + histories = hist_api.get_account_history("dan", operation_history_id_type(0), 0, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(3), 0, operation_history_id_type(9)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // non existent account + histories = hist_api.get_account_history("1.2.18", operation_history_id_type(0), 4, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // create a new account C = alice { 7 } + auto alice = create_account("alice"); + + generate_block(); + fc::usleep(fc::milliseconds(1000)); + + // f(C, 0, 4, 10) = { 7 } + histories = hist_api.get_account_history("alice", operation_history_id_type(0), 4, operation_history_id_type(10)); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 7u); + + // f(C, 8, 4, 10) = { } + histories = hist_api.get_account_history("alice", operation_history_id_type(8), 4, operation_history_id_type(10)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // f(A, 0, 10, 0) = { 7, 5, 3, 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 5u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 7u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[4].id.instance(), 0u); + } + } + catch (fc::exception &e) { + edump((e.to_detail_string())); + throw; + } +} +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/block_tests.cpp b/tests/tests/block_tests.cpp index 9f74a34c3..b7ed69fe7 100644 --- a/tests/tests/block_tests.cpp +++ b/tests/tests/block_tests.cpp @@ -745,6 +745,8 @@ BOOST_FIXTURE_TEST_CASE( maintenance_interval, database_fixture ) PUSH_TX( db, trx, ~0 ); trx.operations.clear(); } + + generate_block(); transfer(account_id_type()(db), nathan, asset(5000)); generate_blocks(maintenence_time - initial_properties.parameters.block_interval); @@ -959,18 +961,23 @@ BOOST_FIXTURE_TEST_CASE( pop_block_twice, database_fixture ) processed_transaction ptx; account_object committee_account_object = committee_account(db); + generate_block(skip_flags); // transfer from committee account to Sam account transfer(committee_account_object, sam_account_object, core.amount(100000)); generate_block(skip_flags); - create_account("alice"); + private_key_type charlie_key = generate_private_key("charlie"); + create_account("charlie", charlie_key); generate_block(skip_flags); - create_account("bob"); generate_block(skip_flags); - + private_key_type bob_key = generate_private_key("bob"); + create_account("bob", bob_key); + generate_block(skip_flags); + db.pop_block(); db.pop_block(); + } catch(const fc::exception& e) { edump( (e.to_detail_string()) ); throw; diff --git a/tests/tests/gpos_tests.cpp b/tests/tests/gpos_tests.cpp index aa9969ee2..646a1782b 100644 --- a/tests/tests/gpos_tests.cpp +++ b/tests/tests/gpos_tests.cpp @@ -905,7 +905,7 @@ BOOST_AUTO_TEST_CASE( worker_dividends_voting ) vote_for(voter1_id, worker.vote_for, voter1_private_key); // first maint pass, coefficient will be 1 - generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + generate_blocks(HARDFORK_GPOS_TIME + fc::hours(12)); worker = worker_id_type()(db); BOOST_CHECK_EQUAL(worker.total_votes_for, 100); @@ -925,8 +925,8 @@ BOOST_AUTO_TEST_CASE( worker_dividends_voting ) generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); // worker is getting paid - BOOST_CHECK_EQUAL(worker_id_type()(db).worker.get().balance(db).balance.amount.value, 10); - BOOST_CHECK_EQUAL(worker.worker.get().balance(db).balance.amount.value, 10); + BOOST_CHECK_EQUAL(worker_id_type()(db).worker.get().balance(db).balance.amount.value, 5); + BOOST_CHECK_EQUAL(worker.worker.get().balance(db).balance.amount.value, 5); // second maint pass, coefficient will be 0.75 worker = worker_id_type()(db); @@ -1009,8 +1009,8 @@ BOOST_AUTO_TEST_CASE( account_multiple_vesting ) vote_for(sam_id, witness1.vote_id, sam_private_key); vote_for(patty_id, witness1.vote_id, patty_private_key); - generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); - + generate_blocks(HARDFORK_GPOS_TIME + fc::hours(12)); //forward 1/2 sub-period so that it consider only gpos votes + // amount in vested balanced will sum up as voting power witness1 = witness_id_type(1)(db); BOOST_CHECK_EQUAL(witness1.total_votes, 400); diff --git a/tests/tests/history_api_tests.cpp b/tests/tests/history_api_tests.cpp index 4edccce52..943b8265d 100644 --- a/tests/tests/history_api_tests.cpp +++ b/tests/tests/history_api_tests.cpp @@ -595,4 +595,4 @@ BOOST_AUTO_TEST_CASE(get_account_history_operations) { } } -BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/lottery_tests.cpp b/tests/tests/lottery_tests.cpp index b0f234e28..063c15c1f 100644 --- a/tests/tests/lottery_tests.cpp +++ b/tests/tests/lottery_tests.cpp @@ -63,6 +63,7 @@ BOOST_AUTO_TEST_CASE( create_lottery_asset_test ) lottery_options.end_date = db.head_block_time() + fc::minutes(5); lottery_options.ticket_price = asset(100); lottery_options.winning_tickets = { 5 * GRAPHENE_1_PERCENT, 5 * GRAPHENE_1_PERCENT, 5 * GRAPHENE_1_PERCENT, 10 * GRAPHENE_1_PERCENT, 10 * GRAPHENE_1_PERCENT, 10 * GRAPHENE_1_PERCENT, 10 * GRAPHENE_1_PERCENT, 10 * GRAPHENE_1_PERCENT, 10 * GRAPHENE_1_PERCENT }; + //lottery_options.winning_tickets = { 75 * GRAPHENE_1_PERCENT }; lottery_options.is_active = test_asset_id.instance.value % 2; lottery_options.ending_on_soldout = true; @@ -482,4 +483,64 @@ BOOST_AUTO_TEST_CASE( try_to_end_empty_lottery_test ) } } +BOOST_AUTO_TEST_CASE( lottery_winner_ticket_id_test ) +{ + try { + asset_id_type test_asset_id = db.get_index().get_next_id(); + INVOKE( create_lottery_asset_test ); + auto test_asset = test_asset_id(db); + for( int i = 1; i < 4; ++i ) { + transfer(account_id_type(), account_id_type(i), asset(2000000)); + } + for( int i = 1; i < 4; ++i ) { + if( i == 4 ) continue; + ticket_purchase_operation tpo; + tpo.buyer = account_id_type(i); + tpo.lottery = test_asset.id; + tpo.tickets_to_buy = 1; + tpo.amount = asset(100); + trx.operations.push_back(std::move(tpo)); + graphene::chain::test::set_expiration(db, trx); + PUSH_TX( db, trx, ~0 ); + trx.operations.clear(); + } + + for( int i = 1; i < 4; ++i ) { + if( i == 4 ) continue; + ticket_purchase_operation tpo; + tpo.buyer = account_id_type(i); + tpo.lottery = test_asset.id; + tpo.tickets_to_buy = 1; + tpo.amount = asset(100); + trx.operations.push_back(std::move(tpo)); + graphene::chain::test::set_expiration(db, trx); + PUSH_TX( db, trx, ~0 ); + trx.operations.clear(); + } + generate_block(); + test_asset = test_asset_id(db); + uint64_t creator_balance_before_end = db.get_balance( account_id_type(), asset_id_type() ).amount.value; + uint64_t jackpot = db.get_balance( test_asset.get_id() ).amount.value; + uint16_t winners_part = 0; + for( uint8_t win: test_asset.lottery_options->winning_tickets ) + winners_part += win; + + while( db.head_block_time() < ( test_asset.lottery_options->end_date ) ) + generate_block(); + + auto op_history = get_operation_history( account_id_type(1) ); //Can observe operation 79 to verify winner ticket number + for( auto h: op_history ) { + idump((h)); + } + + BOOST_CHECK( db.get_balance( test_asset.get_id() ).amount.value == 0 ); + uint64_t creator_recieved = db.get_balance( account_id_type(), asset_id_type() ).amount.value - creator_balance_before_end; + test_asset = test_asset_id(db); + BOOST_CHECK(jackpot * test_asset.lottery_options->benefactors[0].share / GRAPHENE_100_PERCENT == creator_recieved); + } catch (fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/network_broadcast_api_tests.cpp b/tests/tests/network_broadcast_api_tests.cpp index 1e9d4c30d..f7a4116b8 100644 --- a/tests/tests/network_broadcast_api_tests.cpp +++ b/tests/tests/network_broadcast_api_tests.cpp @@ -292,6 +292,7 @@ BOOST_AUTO_TEST_CASE( check_failes_for_duplicates_in_pending_transactions_list ) try { ACTOR( alice ); + generate_blocks( HARDFORK_1000_TIME + 15 ); auto duplicate = make_sport_create_operation("SPORT1", "S1"); @@ -340,12 +341,12 @@ BOOST_AUTO_TEST_CASE( check_fails_for_several_transactions_with_duplicates_in_pe ACTORS((alice)) fc::ecc::private_key committee_key = init_account_priv_key; - + const account_object& moneyman = create_account("moneyman", init_account_pub_key); const asset_object& core = asset_id_type()(db); transfer(account_id_type()(db), moneyman, core.amount(1000000)); - + generate_blocks( HARDFORK_1000_TIME + 15 ); auto duplicate = make_sport_create_operation("SPORT1", "S1"); push_proposal(*this, moneyman, {make_sport_create_operation("SPORT2", "S2"), duplicate} ); @@ -480,4 +481,4 @@ BOOST_AUTO_TEST_CASE( broadcast_transaction_with_callback_test ) { } FC_LOG_AND_RETHROW() } -BOOST_AUTO_TEST_SUITE_END() +BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file