Skip to content

Commit

Permalink
Merge bitcoin#19208: test: move sync_blocks and sync_mempool function…
Browse files Browse the repository at this point in the history
…s to test_framework.py

cc84460 test: move sync_blocks and sync_mempool functions to test_framework.py (Roy Shao)

Pull request description:

  This PR moves `sync_blocks` and `sync_mempool` out from `test_framework/util.py` to `test_framework/test_framework.py` so they can take contextual information of test framework into account.

  * Change all reference callers to call functions from `test_framework.py`
  * Remove `**kwargs` which is not used
  * Take into account of `timeout_factor` when respecting timeout in function implementations.
  * Pass all tests by running `./test/functional/test_runner.py`

  fixes bitcoin#18930

ACKs for top commit:
  MarcoFalke:
    ACK cc84460 , reviewed with --color-moved=dimmed-zebra --color-moved-ws=ignore-all-space 💫

Tree-SHA512: a79b2a3fa842fc26a7aacb834bb2aea88b3049916c0b754e60002a77ce94bb5954e0ea3b436bf268e9295efb62d721dfef263a09339a55c684ac3fda388c275e
  • Loading branch information
MarcoFalke committed Jun 21, 2020
2 parents c273308 + cc84460 commit 4b5c919
Show file tree
Hide file tree
Showing 5 changed files with 56 additions and 66 deletions.
12 changes: 5 additions & 7 deletions test/functional/feature_backwards_compatibility.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,6 @@

from test_framework.util import (
assert_equal,
sync_blocks,
sync_mempools,
)


Expand Down Expand Up @@ -65,7 +63,7 @@ def setup_nodes(self):
def run_test(self):
self.nodes[0].generatetoaddress(101, self.nodes[0].getnewaddress())

sync_blocks(self.nodes)
self.sync_blocks()

# Sanity check the test framework:
res = self.nodes[self.num_nodes - 1].getblockchaininfo()
Expand All @@ -90,17 +88,17 @@ def run_test(self):
# Create a confirmed transaction, receiving coins
address = wallet.getnewaddress()
self.nodes[0].sendtoaddress(address, 10)
sync_mempools(self.nodes)
self.sync_mempools()
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.sync_blocks()
# Create a conflicting transaction using RBF
return_address = self.nodes[0].getnewaddress()
tx1_id = self.nodes[1].sendtoaddress(return_address, 1)
tx2_id = self.nodes[1].bumpfee(tx1_id)["txid"]
# Confirm the transaction
sync_mempools(self.nodes)
self.sync_mempools()
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.sync_blocks()
# Create another conflicting transaction using RBF
tx3_id = self.nodes[1].sendtoaddress(return_address, 1)
tx4_id = self.nodes[1].bumpfee(tx3_id)["txid"]
Expand Down
4 changes: 2 additions & 2 deletions test/functional/rpc_getblockfilter.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal, assert_is_hex_string, assert_raises_rpc_error,
connect_nodes, disconnect_nodes, sync_blocks
connect_nodes, disconnect_nodes
)

FILTER_TYPES = ["basic"]
Expand All @@ -30,7 +30,7 @@ def run_test(self):

# Reorg node 0 to a new chain
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
self.sync_blocks()

assert_equal(self.nodes[0].getblockcount(), 4)
chain1_hashes = [self.nodes[0].getblockhash(block_height) for block_height in range(4)]
Expand Down
59 changes: 48 additions & 11 deletions test/functional/test_framework/test_framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,6 @@
disconnect_nodes,
get_datadir_path,
initialize_datadir,
sync_blocks,
sync_mempools,
)


Expand Down Expand Up @@ -549,15 +547,54 @@ def join_network(self):
connect_nodes(self.nodes[1], 2)
self.sync_all()

def sync_blocks(self, nodes=None, **kwargs):
sync_blocks(nodes or self.nodes, **kwargs)

def sync_mempools(self, nodes=None, **kwargs):
sync_mempools(nodes or self.nodes, **kwargs)

def sync_all(self, nodes=None, **kwargs):
self.sync_blocks(nodes, **kwargs)
self.sync_mempools(nodes, **kwargs)
def sync_blocks(self, nodes=None, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
rpc_connections = nodes or self.nodes
timeout = int(timeout * self.options.timeout_factor)
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Block sync timed out after {}s:{}".format(
timeout,
"".join("\n {!r}".format(b) for b in best_hash),
))

def sync_mempools(self, nodes=None, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
rpc_connections = nodes or self.nodes
timeout = int(timeout * self.options.timeout_factor)
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Mempool sync timed out after {}s:{}".format(
timeout,
"".join("\n {!r}".format(m) for m in pool),
))

def sync_all(self, nodes=None):
self.sync_blocks(nodes)
self.sync_mempools(nodes)

# Private helper methods. These should not be accessed by the subclass test scripts.

Expand Down
44 changes: 0 additions & 44 deletions test/functional/test_framework/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -444,50 +444,6 @@ def connect_nodes(from_connection, node_num):
wait_until(lambda: all(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in from_connection.getpeerinfo()))


def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Block sync timed out after {}s:{}".format(
timeout,
"".join("\n {!r}".format(b) for b in best_hash),
))


def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Mempool sync timed out after {}s:{}".format(
timeout,
"".join("\n {!r}".format(m) for m in pool),
))


# Transaction/Block functions
#############################

Expand Down
3 changes: 1 addition & 2 deletions test/functional/wallet_balance.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
assert_equal,
assert_raises_rpc_error,
connect_nodes,
sync_blocks,
)


Expand Down Expand Up @@ -264,7 +263,7 @@ def test_balances(*, fee_node_1=0):
# Now confirm tx_orig
self.restart_node(1, ['-persistmempool=0'])
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
self.sync_blocks()
self.nodes[1].sendrawtransaction(tx_orig)
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
Expand Down

0 comments on commit 4b5c919

Please sign in to comment.