Skip to content

Commit

Permalink
Merge branch 'develop' into vop-numbering
Browse files Browse the repository at this point in the history
Resolved conflicts:
- libraries/chain/include/graphene/chain/config.hpp : bumped GRAPHENE_CURRENT_DB_VERSION to current date
  • Loading branch information
abitmore committed Apr 3, 2019
2 parents f03c207 + f74f450 commit da6c386
Show file tree
Hide file tree
Showing 70 changed files with 5,671 additions and 317 deletions.
8 changes: 5 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -55,9 +55,11 @@ We recommend building on Ubuntu 16.04 LTS (64-bit)
git submodule sync --recursive
git submodule update --init --recursive

**NOTE:** BitShares requires a [Boost](http://www.boost.org/) version in the range [1.57 - 1.65.1]. Versions earlier than
1.57 or newer than 1.65.1 are NOT supported. If your system's Boost version is newer, then you will need to manually build
an older version of Boost and specify it to CMake using `DBOOST_ROOT`.
**NOTE:** Versions of [Boost](http://www.boost.org/) 1.57 through 1.69 are supported. Newer versions may work, but
have not been tested. If your system came pre-installed with a version of Boost that you do not wish to use, you may
manually build your preferred version and use it with BitShares by specifying it on the CMake command line.

Example: ``cmake -DBOOST_ROOT=/path/to/boost .``

**NOTE:** BitShares requires a 64-bit operating system to build, and will not build on a 32-bit OS.

Expand Down
2 changes: 1 addition & 1 deletion docs
Submodule docs updated from 00bd50 to 0271e7
23 changes: 15 additions & 8 deletions libraries/app/database_api.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include <graphene/app/database_api.hpp>
#include <graphene/app/util.hpp>
#include <graphene/chain/get_config.hpp>
#include <graphene/chain/hardfork.hpp>

#include <fc/bloom_filter.hpp>

Expand Down Expand Up @@ -1291,14 +1292,13 @@ vector<call_order_object> database_api_impl::get_call_orders(const std::string&
{
FC_ASSERT( limit <= 300 );

const asset_id_type asset_a_id = get_asset_from_string(a)->id;
const auto& call_index = _db.get_index_type<call_order_index>().indices().get<by_price>();
const asset_object& mia = _db.get(asset_a_id);
price index_price = price::min(mia.bitasset_data(_db).options.short_backing_asset, mia.get_id());
const asset_object* mia = get_asset_from_string(a);
const auto& call_index = _db.get_index_type<call_order_index>().indices().get<by_collateral>();
price index_price = price::min( mia->bitasset_data(_db).options.short_backing_asset, mia->get_id() );

vector< call_order_object> result;
auto itr_min = call_index.lower_bound(index_price.min());
auto itr_max = call_index.lower_bound(index_price.max());
auto itr_min = call_index.lower_bound(index_price);
auto itr_max = call_index.upper_bound(index_price.max());
while( itr_min != itr_max && result.size() < limit )
{
result.emplace_back(*itr_min);
Expand Down Expand Up @@ -2058,10 +2058,12 @@ set<public_key_type> database_api::get_required_signatures( const signed_transac

set<public_key_type> database_api_impl::get_required_signatures( const signed_transaction& trx, const flat_set<public_key_type>& available_keys )const
{
bool allow_non_immediate_owner = ( _db.head_block_time() >= HARDFORK_CORE_584_TIME );
auto result = trx.get_required_signatures( _db.get_chain_id(),
available_keys,
[&]( account_id_type id ){ return &id(_db).active; },
[&]( account_id_type id ){ return &id(_db).owner; },
allow_non_immediate_owner,
_db.get_global_properties().parameters.max_authority_depth );
return result;
}
Expand All @@ -2077,6 +2079,7 @@ set<address> database_api::get_potential_address_signatures( const signed_transa

set<public_key_type> database_api_impl::get_potential_signatures( const signed_transaction& trx )const
{
bool allow_non_immediate_owner = ( _db.head_block_time() >= HARDFORK_CORE_584_TIME );
set<public_key_type> result;
trx.get_required_signatures(
_db.get_chain_id(),
Expand All @@ -2095,6 +2098,7 @@ set<public_key_type> database_api_impl::get_potential_signatures( const signed_t
result.insert(k);
return &auth;
},
allow_non_immediate_owner,
_db.get_global_properties().parameters.max_authority_depth
);

Expand Down Expand Up @@ -2142,10 +2146,12 @@ bool database_api::verify_authority( const signed_transaction& trx )const

bool database_api_impl::verify_authority( const signed_transaction& trx )const
{
bool allow_non_immediate_owner = ( _db.head_block_time() >= HARDFORK_CORE_584_TIME );
trx.verify_authority( _db.get_chain_id(),
[this]( account_id_type id ){ return &id(_db).active; },
[this]( account_id_type id ){ return &id(_db).owner; },
_db.get_global_properties().parameters.max_authority_depth );
allow_non_immediate_owner,
_db.get_global_properties().parameters.max_authority_depth );
return true;
}

Expand All @@ -2167,7 +2173,8 @@ bool database_api_impl::verify_account_authority( const string& account_name_or_
{
graphene::chain::verify_authority(ops, keys,
[this]( account_id_type id ){ return &id(_db).active; },
[this]( account_id_type id ){ return &id(_db).owner; } );
[this]( account_id_type id ){ return &id(_db).owner; },
true );
}
catch (fc::exception& ex)
{
Expand Down
3 changes: 2 additions & 1 deletion libraries/chain/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ add_library( graphene_chain
protocol/fee_schedule.cpp
protocol/confidential.cpp
protocol/vote.cpp

protocol/htlc.cpp
genesis_state.cpp
get_config.cpp

Expand All @@ -78,6 +78,7 @@ add_library( graphene_chain
vesting_balance_evaluator.cpp
withdraw_permission_evaluator.cpp
worker_evaluator.cpp
htlc_evaluator.cpp
confidential_evaluator.cpp
special_authority.cpp
buyback.cpp
Expand Down
66 changes: 53 additions & 13 deletions libraries/chain/asset_evaluator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,20 @@
#include <locale>

namespace graphene { namespace chain {
namespace detail {
// TODO review and remove code below and links to it after hf_1268
void check_asset_options_hf_1268(const fc::time_point_sec& block_time, const asset_options& options)
{
if( block_time < HARDFORK_1268_TIME )
{
FC_ASSERT( !options.extensions.value.reward_percent.valid(),
"Asset extension reward percent is only available after HARDFORK_1268_TIME!");

FC_ASSERT( !options.extensions.value.whitelist_market_fee_sharing.valid(),
"Asset extension whitelist_market_fee_sharing is only available after HARDFORK_1268_TIME!");
}
}
}

void_result asset_create_evaluator::do_evaluate( const asset_create_operation& op )
{ try {
Expand All @@ -45,6 +59,8 @@ void_result asset_create_evaluator::do_evaluate( const asset_create_operation& o
FC_ASSERT( op.common_options.whitelist_authorities.size() <= chain_parameters.maximum_asset_whitelist_authorities );
FC_ASSERT( op.common_options.blacklist_authorities.size() <= chain_parameters.maximum_asset_whitelist_authorities );

detail::check_asset_options_hf_1268(d.head_block_time(), op.common_options);

// Check that all authorities do exist
for( auto id : op.common_options.whitelist_authorities )
d.get_object(id);
Expand Down Expand Up @@ -277,6 +293,8 @@ void_result asset_update_evaluator::do_evaluate(const asset_update_operation& o)
validate_new_issuer( d, a, *o.new_issuer );
}

detail::check_asset_options_hf_1268(d.head_block_time(), o.new_options);

if( (d.head_block_time() < HARDFORK_572_TIME) || (a.dynamic_asset_data_id(d).current_supply != 0) )
{
// new issuer_permissions must be subset of old issuer permissions
Expand Down Expand Up @@ -638,7 +656,7 @@ static bool update_bitasset_object_options(
const asset_update_bitasset_operation& op, database& db,
asset_bitasset_data_object& bdo, const asset_object& asset_to_update )
{
const fc::time_point_sec& next_maint_time = db.get_dynamic_global_properties().next_maintenance_time;
const fc::time_point_sec next_maint_time = db.get_dynamic_global_properties().next_maintenance_time;
bool after_hf_core_868_890 = ( next_maint_time > HARDFORK_CORE_868_890_TIME );

// If the minimum number of feeds to calculate a median has changed, we need to recalculate the median
Expand Down Expand Up @@ -689,7 +707,7 @@ static bool update_bitasset_object_options(
if( should_update_feeds )
{
const auto old_feed = bdo.current_feed;
bdo.update_median_feeds( db.head_block_time() );
bdo.update_median_feeds( db.head_block_time(), next_maint_time );

// TODO review and refactor / cleanup after hard fork:
// 1. if hf_core_868_890 and core-935 occurred at same time
Expand Down Expand Up @@ -766,8 +784,9 @@ void_result asset_update_feed_producers_evaluator::do_apply(const asset_update_f
{ try {
database& d = db();
const auto head_time = d.head_block_time();
const auto next_maint_time = d.get_dynamic_global_properties().next_maintenance_time;
const asset_bitasset_data_object& bitasset_to_update = asset_to_update->bitasset_data(d);
d.modify( bitasset_to_update, [&o,head_time](asset_bitasset_data_object& a) {
d.modify( bitasset_to_update, [&o,head_time,next_maint_time](asset_bitasset_data_object& a) {
//This is tricky because I have a set of publishers coming in, but a map of publisher to feed is stored.
//I need to update the map such that the keys match the new publishers, but not munge the old price feeds from
//publishers who are being kept.
Expand All @@ -791,7 +810,7 @@ void_result asset_update_feed_producers_evaluator::do_apply(const asset_update_f
{
a.feeds[acc];
}
a.update_median_feeds( head_time );
a.update_median_feeds( head_time, next_maint_time );
});
// Process margin calls, allow black swan, not for a new limit order
d.check_call_orders( *asset_to_update, true, false, &bitasset_to_update );
Expand Down Expand Up @@ -969,27 +988,48 @@ void_result asset_publish_feeds_evaluator::do_apply(const asset_publish_feed_ope
{ try {

database& d = db();
const auto head_time = d.head_block_time();
const auto next_maint_time = d.get_dynamic_global_properties().next_maintenance_time;

const asset_object& base = *asset_ptr;
const asset_bitasset_data_object& bad = *bitasset_ptr;

auto old_feed = bad.current_feed;
// Store medians for this asset
d.modify(bad , [&o,&d](asset_bitasset_data_object& a) {
a.feeds[o.publisher] = make_pair(d.head_block_time(), o.feed);
a.update_median_feeds(d.head_block_time());
d.modify( bad , [&o,head_time,next_maint_time](asset_bitasset_data_object& a) {
a.feeds[o.publisher] = make_pair( head_time, o.feed );
a.update_median_feeds( head_time, next_maint_time );
});

if( !(old_feed == bad.current_feed) )
{
if( bad.has_settlement() ) // implies head_block_time > HARDFORK_CORE_216_TIME
// Check whether need to revive the asset and proceed if need
if( bad.has_settlement() // has globally settled, implies head_block_time > HARDFORK_CORE_216_TIME
&& !bad.current_feed.settlement_price.is_null() ) // has a valid feed
{
bool should_revive = false;
const auto& mia_dyn = base.dynamic_asset_data_id(d);
if( !bad.current_feed.settlement_price.is_null()
&& ( mia_dyn.current_supply == 0
|| ~price::call_price(asset(mia_dyn.current_supply, o.asset_id),
asset(bad.settlement_fund, bad.options.short_backing_asset),
bad.current_feed.maintenance_collateral_ratio ) < bad.current_feed.settlement_price ) )
if( mia_dyn.current_supply == 0 ) // if current supply is zero, revive the asset
should_revive = true;
else // if current supply is not zero, when collateral ratio of settlement fund is greater than MCR, revive the asset
{
if( next_maint_time <= HARDFORK_CORE_1270_TIME )
{
// before core-1270 hard fork, calculate call_price and compare to median feed
if( ~price::call_price( asset(mia_dyn.current_supply, o.asset_id),
asset(bad.settlement_fund, bad.options.short_backing_asset),
bad.current_feed.maintenance_collateral_ratio ) < bad.current_feed.settlement_price )
should_revive = true;
}
else
{
// after core-1270 hard fork, calculate collateralization and compare to maintenance_collateralization
if( price( asset( bad.settlement_fund, bad.options.short_backing_asset ),
asset( mia_dyn.current_supply, o.asset_id ) ) > bad.current_maintenance_collateralization )
should_revive = true;
}
}
if( should_revive )
d.revive_bitasset(base);
}
// Process margin calls, allow black swan, not for a new limit order
Expand Down
25 changes: 14 additions & 11 deletions libraries/chain/asset_object.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
*/
#include <graphene/chain/asset_object.hpp>
#include <graphene/chain/database.hpp>
#include <graphene/chain/hardfork.hpp>

#include <fc/uint128.hpp>

Expand All @@ -43,16 +44,10 @@ share_type asset_bitasset_data_object::max_force_settlement_volume(share_type cu
return volume.to_uint64();
}

/******
* @brief calculate the median feed
*
* This calculates the median feed. It sets the current_feed_publication_time
* and current_feed member variables
*
* @param current_time the time to use in the calculations
*/
void graphene::chain::asset_bitasset_data_object::update_median_feeds(time_point_sec current_time)
void graphene::chain::asset_bitasset_data_object::update_median_feeds( time_point_sec current_time,
time_point_sec next_maintenance_time )
{
bool after_core_hardfork_1270 = ( next_maintenance_time > HARDFORK_CORE_1270_TIME ); // call price caching issue
current_feed_publication_time = current_time;
vector<std::reference_wrapper<const price_feed>> current_feeds;
// find feeds that were alive at current_time
Expand All @@ -73,13 +68,18 @@ void graphene::chain::asset_bitasset_data_object::update_median_feeds(time_point
feed_cer_updated = false; // new median cer is null, won't update asset_object anyway, set to false for better performance
current_feed_publication_time = current_time;
current_feed = price_feed();
if( after_core_hardfork_1270 )
current_maintenance_collateralization = price();
return;
}
if( current_feeds.size() == 1 )
{
if( current_feed.core_exchange_rate != current_feeds.front().get().core_exchange_rate )
feed_cer_updated = true;
current_feed = std::move(current_feeds.front());
current_feed = current_feeds.front();
// Note: perhaps can defer updating current_maintenance_collateralization for better performance
if( after_core_hardfork_1270 )
current_maintenance_collateralization = current_feed.maintenance_collateralization();
return;
}

Expand All @@ -100,6 +100,9 @@ void graphene::chain::asset_bitasset_data_object::update_median_feeds(time_point
if( current_feed.core_exchange_rate != median_feed.core_exchange_rate )
feed_cer_updated = true;
current_feed = median_feed;
// Note: perhaps can defer updating current_maintenance_collateralization for better performance
if( after_core_hardfork_1270 )
current_maintenance_collateralization = current_feed.maintenance_collateralization();
}


Expand Down Expand Up @@ -157,7 +160,7 @@ asset asset_object::amount_from_string(string amount_string) const
satoshis *= -1;

return amount(satoshis);
} FC_CAPTURE_AND_RETHROW( (amount_string) ) }
} FC_CAPTURE_AND_RETHROW( (amount_string) ) }

string asset_object::amount_to_string(share_type amount) const
{
Expand Down
4 changes: 4 additions & 0 deletions libraries/chain/committee_member_evaluator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#include <graphene/chain/committee_member_evaluator.hpp>
#include <graphene/chain/committee_member_object.hpp>
#include <graphene/chain/database.hpp>
#include <graphene/chain/hardfork.hpp>
#include <graphene/chain/account_object.hpp>
#include <graphene/chain/protocol/fee_schedule.hpp>
#include <graphene/chain/protocol/vote.hpp>
Expand Down Expand Up @@ -75,6 +76,9 @@ void_result committee_member_update_global_parameters_evaluator::do_evaluate(con
{ try {
FC_ASSERT(trx_state->_is_proposed_trx);

FC_ASSERT( db().head_block_time() > HARDFORK_CORE_1468_TIME || !o.new_parameters.extensions.value.updatable_htlc_options.valid(),
"Unable to set HTLC parameters until hardfork." );

return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }

Expand Down
Loading

0 comments on commit da6c386

Please sign in to comment.