From 688190c8af4f7bbacac4826e000e72351d627cb9 Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Wed, 20 Jun 2018 21:24:29 -0400 Subject: [PATCH 01/29] [qa] mininode: Expose connection state through is_connected --- test/functional/p2p_leak.py | 6 +- test/functional/p2p_timeouts.py | 18 +++--- test/functional/test_framework/mininode.py | 68 ++++++++++++---------- 3 files changed, 48 insertions(+), 44 deletions(-) diff --git a/test/functional/p2p_leak.py b/test/functional/p2p_leak.py index 2d9ff066cd859..380f8ac46ad41 100755 --- a/test/functional/p2p_leak.py +++ b/test/functional/p2p_leak.py @@ -118,11 +118,11 @@ def run_test(self): time.sleep(5) #This node should have been banned - assert no_version_bannode.state != "connected" + assert not no_version_bannode.is_connected # These nodes should have been disconnected - assert unsupported_service_bit5_node.state != "connected" - assert unsupported_service_bit7_node.state != "connected" + assert not unsupported_service_bit5_node.is_connected + assert not unsupported_service_bit7_node.is_connected self.nodes[0].disconnect_p2ps() diff --git a/test/functional/p2p_timeouts.py b/test/functional/p2p_timeouts.py index 23a252499a897..01f5e74c7a74c 100755 --- a/test/functional/p2p_timeouts.py +++ b/test/functional/p2p_timeouts.py @@ -47,9 +47,9 @@ def run_test(self): sleep(1) - assert no_verack_node.connected - assert no_version_node.connected - assert no_send_node.connected + assert no_verack_node.is_connected + assert no_version_node.is_connected + assert no_send_node.is_connected no_verack_node.send_message(msg_ping()) no_version_node.send_message(msg_ping()) @@ -58,18 +58,18 @@ def run_test(self): assert "version" in no_verack_node.last_message - assert no_verack_node.connected - assert no_version_node.connected - assert no_send_node.connected + assert no_verack_node.is_connected + assert no_version_node.is_connected + assert no_send_node.is_connected no_verack_node.send_message(msg_ping()) no_version_node.send_message(msg_ping()) sleep(31) - assert not no_verack_node.connected - assert not no_version_node.connected - assert not no_send_node.connected + assert not no_verack_node.is_connected + assert not no_version_node.is_connected + assert not no_send_node.is_connected if __name__ == '__main__': TimeoutsTest().main() diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py index 160be8fc5431a..084a2af37a850 100755 --- a/test/functional/test_framework/mininode.py +++ b/test/functional/test_framework/mininode.py @@ -78,6 +78,12 @@ def __init__(self): super().__init__(map=mininode_socket_map) + self._conn_open = False + + @property + def is_connected(self): + return self._conn_open + def peer_connect(self, dstaddr, dstport, net="regtest"): self.dstaddr = dstaddr self.dstport = dstport @@ -85,7 +91,7 @@ def peer_connect(self, dstaddr, dstport, net="regtest"): self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) self.sendbuf = b"" self.recvbuf = b"" - self.state = "connecting" + self._asyncore_pre_connection = True self.network = net self.disconnect = False @@ -98,22 +104,23 @@ def peer_connect(self, dstaddr, dstport, net="regtest"): def peer_disconnect(self): # Connection could have already been closed by other end. - if self.state == "connected": - self.disconnect_node() + if self.is_connected: + self.disconnect = True # Signal asyncore to disconnect # Connection and disconnection methods def handle_connect(self): """asyncore callback when a connection is opened.""" - if self.state != "connected": + if not self.is_connected: logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport)) - self.state = "connected" + self._conn_open = True + self._asyncore_pre_connection = False self.on_open() def handle_close(self): """asyncore callback when a connection is closed.""" logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport)) - self.state = "closed" + self._conn_open = False self.recvbuf = b"" self.sendbuf = b"" try: @@ -122,13 +129,6 @@ def handle_close(self): pass self.on_close() - def disconnect_node(self): - """Disconnect the p2p connection. - - Called by the test logic thread. Causes the p2p connection - to be disconnected on the next iteration of the asyncore loop.""" - self.disconnect = True - # Socket read methods def handle_read(self): @@ -184,9 +184,8 @@ def on_message(self, message): def writable(self): """asyncore method to determine whether the handle_write() callback should be called on the next loop.""" with mininode_lock: - pre_connection = self.state == "connecting" length = len(self.sendbuf) - return (length > 0 or pre_connection) + return length > 0 or self._asyncore_pre_connection def handle_write(self): """asyncore callback when data should be written to the socket.""" @@ -194,7 +193,7 @@ def handle_write(self): # asyncore does not expose socket connection, only the first read/write # event, thus we must check connection manually here to know when we # actually connect - if self.state == "connecting": + if self._asyncore_pre_connection: self.handle_connect() if not self.writable(): return @@ -206,26 +205,17 @@ def handle_write(self): return self.sendbuf = self.sendbuf[sent:] - def send_message(self, message, pushbuf=False): + def send_message(self, message): """Send a P2P message over the socket. This method takes a P2P payload, builds the P2P header and adds the message to the send buffer to be sent over the socket.""" - if self.state != "connected" and not pushbuf: - raise IOError('Not connected, no pushbuf') + if not self.is_connected: + raise IOError('Not connected') self._log_message("send", message) - command = message.command - data = message.serialize() - tmsg = MAGIC_BYTES[self.network] - tmsg += command - tmsg += b"\x00" * (12 - len(command)) - tmsg += struct.pack(" Date: Thu, 3 Jun 2021 12:38:06 -0300 Subject: [PATCH 02/29] qa: Avoid start/stop of the network thread mid-test Adaptation of btc@fa87da2f172ae2e6dc15e9ed156a3564a8ecfbdd from MarkoFalke --- test/functional/example_test.py | 10 - test/functional/feature_block.py | 5 +- test/functional/feature_cltv.py | 4 - test/functional/feature_nulldummy.py | 3 +- test/functional/mining_pos_coldStaking.py | 3 - test/functional/p2p_feefilter.py | 5 +- test/functional/p2p_fingerprint.py | 3 - test/functional/p2p_invalid_block.py | 3 +- test/functional/p2p_invalid_tx.py | 4 +- test/functional/p2p_leak.py | 9 +- test/functional/p2p_mempool.py | 1 - test/functional/p2p_timeouts.py | 4 +- test/functional/p2p_unrequested_blocks.py | 10 +- test/functional/test_framework/mininode.py | 178 ++++++------------ .../test_framework/test_framework.py | 8 + test/functional/test_framework/test_node.py | 10 +- 16 files changed, 82 insertions(+), 178 deletions(-) diff --git a/test/functional/example_test.py b/test/functional/example_test.py index 757a065dd2b6e..a57a64d190ff5 100755 --- a/test/functional/example_test.py +++ b/test/functional/example_test.py @@ -21,8 +21,6 @@ mininode_lock, msg_block, msg_getdata, - network_thread_join, - network_thread_start, ) from test_framework.test_framework import PivxTestFramework from test_framework.util import ( @@ -135,9 +133,6 @@ def run_test(self): # Create P2P connections to two of the nodes self.nodes[0].add_p2p_connection(BaseNode()) - # Start up network handling in another thread. This needs to be called - # after the P2P connections have been created. - network_thread_start() # wait_for_verack ensures that the P2P connection is fully up. self.nodes[0].p2p.wait_for_verack() @@ -189,14 +184,9 @@ def run_test(self): connect_nodes(self.nodes[1], 2) self.log.info("Add P2P connection to node2") - # We can't add additional P2P connections once the network thread has started. Disconnect the connection - # to node0, wait for the network thread to terminate, then connect to node2. This is specific to - # the current implementation of the network thread and may be improved in future. self.nodes[0].disconnect_p2ps() - network_thread_join() self.nodes[2].add_p2p_connection(BaseNode()) - network_thread_start() self.nodes[2].p2p.wait_for_verack() self.log.info("Wait for node2 reach current tip. Test that it has propagated all the blocks to us") diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py index 5c813e451b59f..b1ec5a48672fd 100755 --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -21,7 +21,7 @@ uint256_from_compact, uint256_from_str, ) -from test_framework.mininode import P2PDataStore, network_thread_start, network_thread_join +from test_framework.mininode import P2PDataStore from test_framework.script import ( CScript, MAX_SCRIPT_ELEMENT_SIZE, @@ -84,7 +84,6 @@ def run_test(self): node = self.nodes[0] # convenience reference to the node # reconnect_p2p() expects the network thread to be running self.log.info("Starting network thread...") - network_thread_start() self.reconnect_p2p() self.block_heights = {} @@ -1207,10 +1206,8 @@ def reconnect_p2p(self): The node gets disconnected several times in this test. This helper method reconnects the p2p and restarts the network thread.""" - network_thread_join() self.nodes[0].disconnect_p2ps() self.nodes[0].add_p2p_connection(P2PDataStore()) - network_thread_start() self.nodes[0].p2p.wait_for_verack() def send_blocks(self, blocks, success=True, reject_reason=None, reconnect=False, timeout=60): diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py index d80121c703a23..a0250fe6e7e5a 100755 --- a/test/functional/feature_cltv.py +++ b/test/functional/feature_cltv.py @@ -67,10 +67,6 @@ def set_test_params(self): def run_test(self): self.nodes[0].add_p2p_connection(P2PInterface()) - - network_thread_start() - - # wait_for_verack ensures that the P2P connection is fully up. self.nodes[0].p2p.wait_for_verack() self.log.info("Mining %d blocks", CLTV_HEIGHT - 2) diff --git a/test/functional/feature_nulldummy.py b/test/functional/feature_nulldummy.py index ba420bda02b36..56a40c1217bc7 100755 --- a/test/functional/feature_nulldummy.py +++ b/test/functional/feature_nulldummy.py @@ -15,7 +15,7 @@ from test_framework.test_framework import PivxTestFramework from test_framework.util import * -from test_framework.mininode import CTransaction, network_thread_start +from test_framework.messages import CTransaction from test_framework.blocktools import create_coinbase, create_block, add_witness_commitment from test_framework.script import CScript from io import BytesIO @@ -48,7 +48,6 @@ def run_test(self): self.address = self.nodes[0].getnewaddress() self.ms_address = self.nodes[0].addmultisigaddress(1,[self.address]) - network_thread_start() self.coinbase_blocks = self.nodes[0].generate(2) # Block 2 coinbase_txid = [] for i in self.coinbase_blocks: diff --git a/test/functional/mining_pos_coldStaking.py b/test/functional/mining_pos_coldStaking.py index fdb93a65daa6b..b0ff88818085b 100755 --- a/test/functional/mining_pos_coldStaking.py +++ b/test/functional/mining_pos_coldStaking.py @@ -8,7 +8,6 @@ from time import sleep from test_framework.messages import CTransaction, CTxIn, CTxOut, COIN, COutPoint -from test_framework.mininode import network_thread_start from test_framework.pivx_node import PivxTestNode from test_framework.script import CScript, OP_CHECKSIG from test_framework.test_framework import PivxTestFramework @@ -52,8 +51,6 @@ def init_test(self): self.test_nodes.append(PivxTestNode()) self.test_nodes[i].peer_connect('127.0.0.1', p2p_port(i)) - network_thread_start() # Start up network handling in another thread - # Let the test nodes get in sync for i in range(self.num_nodes): self.test_nodes[i].wait_for_verack() diff --git a/test/functional/p2p_feefilter.py b/test/functional/p2p_feefilter.py index e7e77429a638b..4821f1d6c1546 100755 --- a/test/functional/p2p_feefilter.py +++ b/test/functional/p2p_feefilter.py @@ -47,9 +47,8 @@ def run_test(self): node1.generate(1) self.sync_blocks() - # Setup the p2p connections and start up the network thread. - self.nodes[0].add_p2p_connection(TestNode()) - network_thread_start() + # Setup the p2p connections + self.nodes[0].add_p2p_connection(TestP2PConn()) self.nodes[0].p2p.wait_for_verack() # Test that invs are received for all txs at feerate of 20 sat/byte diff --git a/test/functional/p2p_fingerprint.py b/test/functional/p2p_fingerprint.py index d95c653bfae34..90279d9a91562 100755 --- a/test/functional/p2p_fingerprint.py +++ b/test/functional/p2p_fingerprint.py @@ -18,7 +18,6 @@ msg_block, msg_getdata, msg_getheaders, - network_thread_start, wait_until, ) from test_framework.test_framework import PivxTestFramework @@ -76,8 +75,6 @@ def last_header_equals(self, expected_hash, node): # last month but that have over a month's worth of work are also withheld. def run_test(self): node0 = self.nodes[0].add_p2p_connection(P2PInterface()) - - network_thread_start() node0.wait_for_verack() # Set node time to 60 days ago diff --git a/test/functional/p2p_invalid_block.py b/test/functional/p2p_invalid_block.py index 6aef35f0e35b8..a7de6e29b3431 100755 --- a/test/functional/p2p_invalid_block.py +++ b/test/functional/p2p_invalid_block.py @@ -14,7 +14,7 @@ from test_framework.blocktools import create_block, create_coinbase, create_transaction from test_framework.messages import COIN -from test_framework.mininode import network_thread_start, P2PDataStore +from test_framework.mininode import P2PDataStore from test_framework.script import ( CScript, OP_TRUE, @@ -35,7 +35,6 @@ def run_test(self): # Add p2p connection to node0 node = self.nodes[0] # convenience reference to the node node.add_p2p_connection(P2PDataStore()) - network_thread_start() node.p2p.wait_for_verack() best_block = node.getblock(node.getbestblockhash()) diff --git a/test/functional/p2p_invalid_tx.py b/test/functional/p2p_invalid_tx.py index bb15f25d82d5d..9705f7fb6788c 100755 --- a/test/functional/p2p_invalid_tx.py +++ b/test/functional/p2p_invalid_tx.py @@ -13,7 +13,7 @@ CTxIn, CTxOut, ) -from test_framework.mininode import network_thread_start, P2PDataStore, network_thread_join +from test_framework.mininode import P2PDataStore, network_thread_join from test_framework.script import ( CScript, OP_NOTIF, @@ -37,7 +37,6 @@ def bootstrap_p2p(self, *, num_connections=1): Helper to connect and wait for version handshake.""" for _ in range(num_connections): self.nodes[0].add_p2p_connection(P2PDataStore()) - network_thread_start() self.nodes[0].p2p.wait_for_verack() def reconnect_p2p(self, **kwargs): @@ -46,7 +45,6 @@ def reconnect_p2p(self, **kwargs): The node gets disconnected several times in this test. This helper method reconnects the p2p and restarts the network thread.""" self.nodes[0].disconnect_p2ps() - network_thread_join() self.bootstrap_p2p(**kwargs) def new_spend_tx(self, prev_hash, prev_n, values): diff --git a/test/functional/p2p_leak.py b/test/functional/p2p_leak.py index 380f8ac46ad41..10b4b071c4db0 100755 --- a/test/functional/p2p_leak.py +++ b/test/functional/p2p_leak.py @@ -103,8 +103,6 @@ def run_test(self): unsupported_service_bit5_node = self.nodes[0].add_p2p_connection(CLazyNode(), services=NODE_NETWORK) unsupported_service_bit7_node = self.nodes[0].add_p2p_connection(CLazyNode(), services=NODE_NETWORK) - network_thread_start() - wait_until(lambda: no_version_bannode.ever_connected, timeout=10, lock=mininode_lock) wait_until(lambda: no_version_idlenode.ever_connected, timeout=10, lock=mininode_lock) wait_until(lambda: no_verack_idlenode.version_received, timeout=10, lock=mininode_lock) @@ -126,9 +124,8 @@ def run_test(self): self.nodes[0].disconnect_p2ps() - # Wait until all connections are closed and the network thread has terminated + # Wait until all connections are closed wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 0) - network_thread_join() # Make sure no unexpected messages came in assert(no_version_bannode.unexpected_msg == False) @@ -143,11 +140,9 @@ def run_test(self): allowed_service_bit5_node = self.nodes[0].add_p2p_connection(P2PInterface(), services=NODE_NETWORK) allowed_service_bit7_node = self.nodes[0].add_p2p_connection(P2PInterface(), services=NODE_NETWORK) - # Network thread stopped when all previous P2PInterfaces disconnected. Restart it - network_thread_start() - wait_until(lambda: allowed_service_bit5_node.message_count["verack"], lock=mininode_lock) wait_until(lambda: allowed_service_bit7_node.message_count["verack"], lock=mininode_lock) + if __name__ == '__main__': P2PLeakTest().main() diff --git a/test/functional/p2p_mempool.py b/test/functional/p2p_mempool.py index 9f05a560fa5db..ab204483b10f2 100755 --- a/test/functional/p2p_mempool.py +++ b/test/functional/p2p_mempool.py @@ -21,7 +21,6 @@ def set_test_params(self): def run_test(self): # Add a p2p connection self.nodes[0].add_p2p_connection(P2PInterface()) - network_thread_start() self.nodes[0].p2p.wait_for_verack() #request mempool diff --git a/test/functional/p2p_timeouts.py b/test/functional/p2p_timeouts.py index 01f5e74c7a74c..dc4cd9ab91c61 100755 --- a/test/functional/p2p_timeouts.py +++ b/test/functional/p2p_timeouts.py @@ -38,13 +38,11 @@ def set_test_params(self): self.num_nodes = 1 def run_test(self): - # Setup the p2p connections and start up the network thread. + # Setup the p2p connections no_verack_node = self.nodes[0].add_p2p_connection(TestNode()) no_version_node = self.nodes[0].add_p2p_connection(TestNode(), send_version=False) no_send_node = self.nodes[0].add_p2p_connection(TestNode(), send_version=False) - network_thread_start() - sleep(1) assert no_verack_node.is_connected diff --git a/test/functional/p2p_unrequested_blocks.py b/test/functional/p2p_unrequested_blocks.py index 3b873d4efed54..cb47981ec48c8 100755 --- a/test/functional/p2p_unrequested_blocks.py +++ b/test/functional/p2p_unrequested_blocks.py @@ -77,15 +77,11 @@ def setup_network(self): self.setup_nodes() def run_test(self): - # Setup the p2p connections and start up the network thread. + # Setup the p2p connections # test_node connects to node0 (not whitelisted) test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # min_work_node connects to node1 (whitelisted) min_work_node = self.nodes[1].add_p2p_connection(P2PInterface()) - - network_thread_start() - - # Test logic begins here test_node.wait_for_verack() min_work_node.wait_for_verack() @@ -208,10 +204,8 @@ def run_test(self): self.nodes[0].disconnect_p2ps() self.nodes[1].disconnect_p2ps() - network_thread_join() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) - network_thread_start() test_node.wait_for_verack() test_node.send_message(msg_block(block_h1f)) @@ -297,8 +291,6 @@ def run_test(self): self.nodes[0].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) - - network_thread_start() test_node.wait_for_verack() # We should have failed reorg and switched back to 290 (but have block 291) diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py index 084a2af37a850..b871df3432402 100755 --- a/test/functional/test_framework/mininode.py +++ b/test/functional/test_framework/mininode.py @@ -13,11 +13,10 @@ P2PInterface: A high-level interface object for communicating to a node over P2P P2PDataStore: A p2p interface class that keeps a store of transactions and blocks and can respond correctly to getdata and getheaders messages""" -import asyncore +import asyncio from collections import defaultdict from io import BytesIO import logging -import socket import struct import sys import threading @@ -58,7 +57,8 @@ "regtest": b"\xa1\xcf\x7e\xac", # regtest } -class P2PConnection(asyncore.dispatcher): + +class P2PConnection(asyncio.Protocol): """A low-level connection object to a node's P2P interface. This class is responsible for: @@ -72,68 +72,59 @@ class P2PConnection(asyncore.dispatcher): sub-classed and the on_message() callback overridden.""" def __init__(self): - # All P2PConnections must be created before starting the NetworkThread. - # assert that the network thread is not running. - assert not network_thread_running() - - super().__init__(map=mininode_socket_map) - - self._conn_open = False + # The underlying transport of the connection. + # Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe + self._transport = None @property def is_connected(self): - return self._conn_open + return self._transport is not None def peer_connect(self, dstaddr, dstport, net="regtest"): + assert not self.is_connected self.dstaddr = dstaddr self.dstport = dstport - self.create_socket(socket.AF_INET, socket.SOCK_STREAM) - self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - self.sendbuf = b"" + # The initial message to send after the connection was made: + self.on_connection_send_msg = None self.recvbuf = b"" - self._asyncore_pre_connection = True self.network = net - self.disconnect = False - logger.debug('Connecting to PIVX Node: %s:%d' % (self.dstaddr, self.dstport)) - try: - self.connect((dstaddr, dstport)) - except: - self.handle_close() + loop = NetworkThread.network_event_loop + conn_gen_unsafe = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport) + conn_gen = lambda: loop.call_soon_threadsafe(loop.create_task, conn_gen_unsafe) + return conn_gen def peer_disconnect(self): # Connection could have already been closed by other end. - if self.is_connected: - self.disconnect = True # Signal asyncore to disconnect + NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort()) # Connection and disconnection methods - def handle_connect(self): - """asyncore callback when a connection is opened.""" - if not self.is_connected: - logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport)) - self._conn_open = True - self._asyncore_pre_connection = False - self.on_open() - - def handle_close(self): - """asyncore callback when a connection is closed.""" - logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport)) - self._conn_open = False + def connection_made(self, transport): + """asyncio callback when a connection is opened.""" + assert not self._transport + logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport)) + self._transport = transport + if self.on_connection_send_msg: + self.send_message(self.on_connection_send_msg) + self.on_connection_send_msg = None # Never used again + self.on_open() + + def connection_lost(self, exc): + """asyncio callback when a connection is closed.""" + if exc: + logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc)) + else: + logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport)) + self._transport = None self.recvbuf = b"" - self.sendbuf = b"" - try: - self.close() - except: - pass self.on_close() # Socket read methods - def handle_read(self): - """asyncore callback when data is read from the socket.""" - t = self.recv(8192) + def data_received(self, t): + """asyncio callback when data is read from the socket.""" if len(t) > 0: self.recvbuf += t self._on_data() @@ -181,30 +172,6 @@ def on_message(self, message): # Socket write methods - def writable(self): - """asyncore method to determine whether the handle_write() callback should be called on the next loop.""" - with mininode_lock: - length = len(self.sendbuf) - return length > 0 or self._asyncore_pre_connection - - def handle_write(self): - """asyncore callback when data should be written to the socket.""" - with mininode_lock: - # asyncore does not expose socket connection, only the first read/write - # event, thus we must check connection manually here to know when we - # actually connect - if self._asyncore_pre_connection: - self.handle_connect() - if not self.writable(): - return - - try: - sent = self.send(self.sendbuf) - except: - self.handle_close() - return - self.sendbuf = self.sendbuf[sent:] - def send_message(self, message): """Send a P2P message over the socket. @@ -214,15 +181,7 @@ def send_message(self, message): raise IOError('Not connected') self._log_message("send", message) tmsg = self._build_message(message) - with mininode_lock: - if len(self.sendbuf) == 0: - try: - sent = self.send(tmsg) - self.sendbuf = tmsg[sent:] - except BlockingIOError: - self.sendbuf = tmsg - else: - self.sendbuf += tmsg + NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.write(tmsg)) # Class utility methods @@ -276,7 +235,7 @@ def __init__(self): self.nServices = 0 def peer_connect(self, *args, services=NODE_NETWORK, send_version=True, **kwargs): - super().peer_connect(*args, **kwargs) + create_conn = super().peer_connect(*args, **kwargs) if send_version: # Send a version msg @@ -286,7 +245,9 @@ def peer_connect(self, *args, services=NODE_NETWORK, send_version=True, **kwargs vt.addrTo.port = self.dstport vt.addrFrom.ip = "0.0.0.0" vt.addrFrom.port = 0 - self.sendbuf = self._build_message(vt) # Will be sent right after handle_connect + self.on_connection_send_msg = vt # Will be sent soon after connection_made + + return create_conn # Message receiving methods @@ -410,56 +371,35 @@ def sync_with_ping(self, timeout=60): self.ping_counter += 1 -# Keep our own socket map for asyncore, so that we can track disconnects -# ourselves (to workaround an issue with closing an asyncore socket when -# using select) -mininode_socket_map = dict() - -# One lock for synchronizing all data access between the networking thread (see +# One lock for synchronizing all data access between the network event loop (see # NetworkThread below) and the thread running the test logic. For simplicity, -# P2PConnection acquires this lock whenever delivering a message to a P2PInterface, -# and whenever adding anything to the send buffer (in send_message()). This -# lock should be acquired in the thread running the test logic to synchronize +# P2PConnection acquires this lock whenever delivering a message to a P2PInterface. +# This lock should be acquired in the thread running the test logic to synchronize # access to any data shared with the P2PInterface or P2PConnection. mininode_lock = threading.RLock() + class NetworkThread(threading.Thread): + network_event_loop = None + def __init__(self): super().__init__(name="NetworkThread") + # There is only one event loop and no more than one thread must be created + assert not self.network_event_loop + + NetworkThread.network_event_loop = asyncio.new_event_loop() def run(self): - while mininode_socket_map: - # We check for whether to disconnect outside of the asyncore - # loop to workaround the behavior of asyncore when using - # select - disconnected = [] - for fd, obj in mininode_socket_map.items(): - if obj.disconnect: - disconnected.append(obj) - [obj.handle_close() for obj in disconnected] - asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1) - logger.debug("Network thread closing") - -def network_thread_start(): - """Start the network thread.""" - # Only one network thread may run at a time - assert not network_thread_running() - - NetworkThread().start() - -def network_thread_running(): - """Return whether the network thread is running.""" - return any([thread.name == "NetworkThread" for thread in threading.enumerate()]) - -def network_thread_join(timeout=10): - """Wait timeout seconds for the network thread to terminate. - - Throw if the network thread doesn't terminate in timeout seconds.""" - network_threads = [thread for thread in threading.enumerate() if thread.name == "NetworkThread"] - assert len(network_threads) <= 1 - for thread in network_threads: - thread.join(timeout) - assert not thread.is_alive() + """Start the network thread.""" + self.network_event_loop.run_forever() + + def close(self, timeout=10): + """Close the connections and network event loop.""" + self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop) + wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout) + self.network_event_loop.close() + self.join(timeout) + class P2PDataStore(P2PInterface): """A P2P data store class. diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index 41ee1beabf2c5..260972240d125 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -38,6 +38,7 @@ OP_CHECKSIG, ) from .test_node import TestNode +from .mininode import NetworkThread from .util import ( MAX_NODES, PortSeed, @@ -95,6 +96,7 @@ def __init__(self): """Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method""" self.setup_clean_chain = False self.nodes = [] + self.network_thread = None self.mocktime = 0 self.rpc_timewait = 600 # Wait for up to 600 seconds for the RPC server to respond self.supports_cli = False @@ -154,6 +156,10 @@ def main(self): self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX) self._start_logging() + self.log.debug('Setting up network thread') + self.network_thread = NetworkThread() + self.network_thread.start() + success = TestStatus.FAILED try: @@ -182,6 +188,8 @@ def main(self): print("Testcase failed. Attaching python debugger. Enter ? for help") pdb.set_trace() + self.log.debug('Closing down network thread') + self.network_thread.close() if not self.options.noshutdown: self.log.info("Stopping nodes") if self.nodes: diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index a52a68bba0c9d..690878f0b90e7 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -243,7 +243,7 @@ def add_p2p_connection(self, p2p_conn, *args, **kwargs): if 'dstaddr' not in kwargs: kwargs['dstaddr'] = '127.0.0.1' - p2p_conn.peer_connect(*args, **kwargs) + p2p_conn.peer_connect(*args, **kwargs)() self.p2ps.append(p2p_conn) return p2p_conn @@ -297,10 +297,10 @@ def __getattr__(self, command): def batch(self, requests): results = [] for request in requests: - try: - results.append(dict(result=request())) - except JSONRPCException as e: - results.append(dict(error=e)) + try: + results.append(dict(result=request())) + except JSONRPCException as e: + results.append(dict(error=e)) return results def send_cli(self, command=None, *args, **kwargs): From be9dacb89ca1ab95d29d08ba507c17074f246c9c Mon Sep 17 00:00:00 2001 From: marcoagner Date: Thu, 19 Jul 2018 12:14:05 +0100 Subject: [PATCH 03/29] tests: fixes mininode's P2PConnection sending messages on closing transport - checks if _transport.is_closing() (added in python3.4.4/python3.5.1) before attempting to send messages on P2PConnection's send_message method. --- test/functional/test_framework/mininode.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py index b871df3432402..84331cf5261b1 100755 --- a/test/functional/test_framework/mininode.py +++ b/test/functional/test_framework/mininode.py @@ -181,7 +181,7 @@ def send_message(self, message): raise IOError('Not connected') self._log_message("send", message) tmsg = self._build_message(message) - NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.write(tmsg)) + NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and not self._transport.is_closing() and self._transport.write(tmsg)) # Class utility methods From db28a539eee357d6350f5e08dc189898ce11080d Mon Sep 17 00:00:00 2001 From: Daniel Kraft Date: Mon, 23 Jul 2018 14:43:45 +0200 Subject: [PATCH 04/29] Skip is_closing() check when not available. https://github.com/bitcoin/bitcoin/pull/13715 introduced a new check for _transport.is_closing() in mininode's P2PConnection's. This function is only available from Python 3.4.4, though, while Bitcoin is supposed to support all Python 3.4 versions. In this change, we make the check conditional on is_closing() being available. If it is not, then we revert to the behaviour before the check was introduced; this means that https://github.com/bitcoin/bitcoin/issues/13579 is not fixed for old systems, but at least the tests work as they used to do before. This includes a small refactoring from a one-line lambda to an inline function, because this makes the code easier to read with more and more conditions being added. Fixes https://github.com/bitcoin/bitcoin/issues/13745. --- test/functional/test_framework/mininode.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py index 84331cf5261b1..894f11f893682 100755 --- a/test/functional/test_framework/mininode.py +++ b/test/functional/test_framework/mininode.py @@ -181,7 +181,17 @@ def send_message(self, message): raise IOError('Not connected') self._log_message("send", message) tmsg = self._build_message(message) - NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and not self._transport.is_closing() and self._transport.write(tmsg)) + + def maybe_write(): + if not self._transport: + return + # Python <3.4.4 does not have is_closing, so we have to check for + # its existence explicitly as long as Bitcoin Core supports all + # Python 3.4 versions. + if hasattr(self._transport, 'is_closing') and self._transport.is_closing(): + return + self._transport.write(tmsg) + NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write) # Class utility methods From 8469afeee33e250710959d1438701dca519db366 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Wed, 10 Oct 2018 02:51:19 -0400 Subject: [PATCH 05/29] test: forward timeouts properly in send_blocks_and_test --- test/functional/test_framework/mininode.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py index 894f11f893682..1c5c5e3ebd643 100755 --- a/test/functional/test_framework/mininode.py +++ b/test/functional/test_framework/mininode.py @@ -466,9 +466,9 @@ def send_blocks_and_test(self, blocks, node, success=True, reject_reason=None, e self.send_message(msg_block(blocks[-1])) if expect_disconnect: - self.wait_for_disconnect() + self.wait_for_disconnect(timeout=timeout) else: - self.sync_with_ping() + self.sync_with_ping(timeout=timeout) if success: wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout) From ae68c6ece66fb304e11a16e0f7835a826b3b731b Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Fri, 19 Oct 2018 13:32:49 -0400 Subject: [PATCH 06/29] tests: add utility to assert node memory usage hasn't increased Adds a utility to get resident set size memory usage for a test node and a context manager that allows assertions based upon maximum memory use increase. --- test/functional/test_framework/test_node.py | 45 +++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 690878f0b90e7..e3a72c87eaf95 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -81,6 +81,28 @@ def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mo self.p2ps = [] + def get_mem_rss(self): + """Get the memory usage (RSS) per `ps`. + + If process is stopped or `ps` is unavailable, return None. + """ + if not (self.running and self.process): + self.log.warning("Couldn't get memory usage; process isn't running.") + return None + + try: + return int(subprocess.check_output( + "ps h -o rss {}".format(self.process.pid), + shell=True, stderr=subprocess.DEVNULL).strip()) + + # Catching `Exception` broadly to avoid failing on platforms where ps + # isn't installed or doesn't work as expected, e.g. OpenBSD. + # + # We could later use something like `psutils` to work across platforms. + except Exception: + self.log.exception("Unable to get memory usage") + return None + def __del__(self): # Ensure that we don't leave any bitcoind processes lying around after # the test ends @@ -225,6 +247,29 @@ def assert_debug_log(self, expected_msgs): if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None: raise AssertionError('Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log)) + @contextlib.contextmanager + def assert_memory_usage_stable(self, perc_increase_allowed=0.03): + """Context manager that allows the user to assert that a node's memory usage (RSS) + hasn't increased beyond some threshold percentage. + """ + before_memory_usage = self.get_mem_rss() + + yield + + after_memory_usage = self.get_mem_rss() + + if not (before_memory_usage and after_memory_usage): + self.log.warning("Unable to detect memory usage (RSS) - skipping memory check.") + return + + perc_increase_memory_usage = 1 - (float(before_memory_usage) / after_memory_usage) + + if perc_increase_memory_usage > perc_increase_allowed: + raise AssertionError( + "Memory usage increased over threshold of {:.3f}% from {} to {} ({:.3f}%)".format( + perc_increase_allowed * 100, before_memory_usage, after_memory_usage, + perc_increase_memory_usage * 100)) + def node_encrypt_wallet(self, passphrase): """"Encrypts the wallet. From 6f21213eb05d0a079c7da6faa1122d73816be879 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Fri, 19 Oct 2018 13:34:28 -0400 Subject: [PATCH 07/29] tests: add P2PConnection.send_raw_message --- test/functional/test_framework/mininode.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py index 1c5c5e3ebd643..e0d336cbcfaff 100755 --- a/test/functional/test_framework/mininode.py +++ b/test/functional/test_framework/mininode.py @@ -177,10 +177,13 @@ def send_message(self, message): This method takes a P2P payload, builds the P2P header and adds the message to the send buffer to be sent over the socket.""" + tmsg = self.build_message(message) + self._log_message("send", message) + return self.send_raw_message(tmsg) + + def send_raw_message(self, raw_message_bytes): if not self.is_connected: raise IOError('Not connected') - self._log_message("send", message) - tmsg = self._build_message(message) def maybe_write(): if not self._transport: @@ -190,12 +193,12 @@ def maybe_write(): # Python 3.4 versions. if hasattr(self._transport, 'is_closing') and self._transport.is_closing(): return - self._transport.write(tmsg) + self._transport.write(raw_message_bytes) NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write) # Class utility methods - def _build_message(self, message): + def build_message(self, message): """Build a serialized P2P message""" command = message.command data = message.serialize() @@ -369,9 +372,9 @@ def wait_for_verack(self, timeout=60): # Message sending helper functions - def send_and_ping(self, message): + def send_and_ping(self, message, timeout=60): self.send_message(message) - self.sync_with_ping() + self.sync_with_ping(timeout=timeout) # Sync up with the node def sync_with_ping(self, timeout=60): From 0edfce565c0779a163337502b9b91de0d29232c5 Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Thu, 8 Nov 2018 17:33:15 -0500 Subject: [PATCH 08/29] test_node: get_mem_rss fixups --- test/functional/test_framework/test_node.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index e3a72c87eaf95..5d7fe5438ef53 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -84,22 +84,19 @@ def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mo def get_mem_rss(self): """Get the memory usage (RSS) per `ps`. - If process is stopped or `ps` is unavailable, return None. + Returns None if `ps` is unavailable. """ - if not (self.running and self.process): - self.log.warning("Couldn't get memory usage; process isn't running.") - return None + assert self.running try: return int(subprocess.check_output( - "ps h -o rss {}".format(self.process.pid), - shell=True, stderr=subprocess.DEVNULL).strip()) + ["ps", "h", "-o", "rss", "{}".format(self.process.pid)], + stderr=subprocess.DEVNULL).split()[-1]) - # Catching `Exception` broadly to avoid failing on platforms where ps - # isn't installed or doesn't work as expected, e.g. OpenBSD. + # Avoid failing on platforms where ps isn't installed. # # We could later use something like `psutils` to work across platforms. - except Exception: + except (FileNotFoundError, subprocess.SubprocessError): self.log.exception("Unable to get memory usage") return None @@ -262,7 +259,7 @@ def assert_memory_usage_stable(self, perc_increase_allowed=0.03): self.log.warning("Unable to detect memory usage (RSS) - skipping memory check.") return - perc_increase_memory_usage = 1 - (float(before_memory_usage) / after_memory_usage) + perc_increase_memory_usage = (after_memory_usage / before_memory_usage) - 1 if perc_increase_memory_usage > perc_increase_allowed: raise AssertionError( From 55a37b5c1e505b2a89a39bd7dd48097ee968e4eb Mon Sep 17 00:00:00 2001 From: furszy Date: Thu, 3 Jun 2021 12:57:05 -0300 Subject: [PATCH 09/29] net: fix missing jump line in "Oversized message from peer.." error. --- src/net.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/net.cpp b/src/net.cpp index 27c2eff4bb9e6..2f4450535317a 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -706,7 +706,7 @@ bool CNode::ReceiveMsgBytes(const char* pch, unsigned int nBytes, bool& complete return false; if (msg.in_data && msg.hdr.nMessageSize > MAX_PROTOCOL_MESSAGE_LENGTH) { - LogPrint(BCLog::NET, "Oversized message from peer=%i, disconnecting", GetId()); + LogPrint(BCLog::NET, "Oversized message from peer=%i, disconnecting\n", GetId()); return false; } From 51ddd3d4ca60ab4ef6148955e65a5cc33e297fac Mon Sep 17 00:00:00 2001 From: furszy Date: Thu, 3 Jun 2021 13:13:14 -0300 Subject: [PATCH 10/29] Introduce p2p_invalid_messages.py test [Conjunction of several PRs coming from btc] --- test/functional/p2p_invalid_messages.py | 175 ++++++++++++++++++++ test/functional/test_framework/test_node.py | 16 +- test/functional/test_runner.py | 1 + 3 files changed, 186 insertions(+), 6 deletions(-) create mode 100755 test/functional/p2p_invalid_messages.py diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py new file mode 100755 index 0000000000000..558c9ce44c6be --- /dev/null +++ b/test/functional/p2p_invalid_messages.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2018 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test node responses to invalid network messages.""" +import struct + +from test_framework import messages +from test_framework.mininode import P2PDataStore +from test_framework.test_framework import PivxTestFramework + + +class msg_unrecognized: + """Nonsensical message. Modeled after similar types in test_framework.messages.""" + + command = b'badmsg' + + def __init__(self, str_data): + self.str_data = str_data.encode() if not isinstance(str_data, bytes) else str_data + + def serialize(self): + return messages.ser_string(self.str_data) + + def __repr__(self): + return "{}(data={})".format(self.command, self.str_data) + + +class msg_nametoolong(msg_unrecognized): + + command = b'thisnameiswayyyyyyyyytoolong' + + +class InvalidMessagesTest(PivxTestFramework): + + def set_test_params(self): + self.num_nodes = 1 + self.setup_clean_chain = True + + def run_test(self): + """ + 0. Send a bunch of large (4MB) messages of an unrecognized type. Check to see + that it isn't an effective DoS against the node. + + 1. Send an oversized (4MB+) message and check that we're disconnected. + + 2. Send a few messages with an incorrect data size in the header, ensure the + messages are ignored. + + 3. Send an unrecognized message with a command name longer than 12 characters. + + """ + node = self.nodes[0] + self.node = node + node.add_p2p_connection(P2PDataStore()) + conn2 = node.add_p2p_connection(P2PDataStore()) + + msg_limit = 4 * 1000 * 1000 # 4MB, per MAX_PROTOCOL_MESSAGE_LENGTH + valid_data_limit = msg_limit - 5 # Account for the 4-byte length prefix + + # + # 0. + # + # Send as large a message as is valid, ensure we aren't disconnected but + # also can't exhaust resources. + # + msg_at_size = msg_unrecognized("b" * valid_data_limit) + assert len(msg_at_size.serialize()) == msg_limit + + with node.assert_memory_usage_stable(increase_allowed=0.5): + self.log.info( + "Sending a bunch of large, junk messages to test " + "memory exhaustion. May take a bit...") + + # Run a bunch of times to test for memory exhaustion. + for _ in range(80): + node.p2p.send_message(msg_at_size) + + # Check that, even though the node is being hammered by nonsense from one + # connection, it can still service other peers in a timely way. + for _ in range(20): + conn2.sync_with_ping(timeout=2) + + # Peer 1, despite serving up a bunch of nonsense, should still be connected. + self.log.info("Waiting for node to drop junk messages.") + node.p2p.sync_with_ping(timeout=30) + assert node.p2p.is_connected + + # + # 1. + # + # Send an oversized message, ensure we're disconnected. + # + msg_over_size = msg_unrecognized("b" * (valid_data_limit + 1)) + assert len(msg_over_size.serialize()) == (msg_limit + 1) + + with node.assert_debug_log(["Oversized message from peer=1, disconnecting"]): + # An unknown message type (or *any* message type) over + # MAX_PROTOCOL_MESSAGE_LENGTH should result in a disconnect. + node.p2p.send_message(msg_over_size) + node.p2p.wait_for_disconnect(timeout=4) + + node.disconnect_p2ps() + conn = node.add_p2p_connection(P2PDataStore()) + conn.wait_for_verack() + + # + # 2. + # + # Send messages with an incorrect data size in the header. + # + actual_size = 100 + msg = msg_unrecognized("b" * actual_size) + + # TODO: handle larger-than cases. I haven't been able to pin down what behavior to expect. + for wrong_size in (2, 77, 78, 79): + self.log.info("Sending a message with incorrect size of {}".format(wrong_size)) + + # Unmodified message should submit okay. + node.p2p.send_and_ping(msg) + + # A message lying about its data size results in a disconnect when the incorrect + # data size is less than the actual size. + # + # TODO: why does behavior change at 78 bytes? + # + node.p2p.send_raw_message(self._tweak_msg_data_size(msg, wrong_size)) + + # For some reason unknown to me, we sometimes have to push additional data to the + # peer in order for it to realize a disconnect. + try: + node.p2p.send_message(messages.msg_ping(nonce=123123)) + except IOError: + pass + + node.p2p.wait_for_disconnect(timeout=10) + node.disconnect_p2ps() + node.add_p2p_connection(P2PDataStore()) + + # + # 3. + # + # Send a message with a too-long command name. + # + node.p2p.send_message(msg_nametoolong("foobar")) + node.p2p.wait_for_disconnect(timeout=4) + + # Node is still up. + conn = node.add_p2p_connection(P2PDataStore()) + conn.sync_with_ping() + + + def _tweak_msg_data_size(self, message, wrong_size): + """ + Return a raw message based on another message but with an incorrect data size in + the message header. + """ + raw_msg = self.node.p2p.build_message(message) + + bad_size_bytes = struct.pack(" perc_increase_allowed: + if perc_increase_memory_usage > increase_allowed: raise AssertionError( "Memory usage increased over threshold of {:.3f}% from {} to {} ({:.3f}%)".format( - perc_increase_allowed * 100, before_memory_usage, after_memory_usage, + increase_allowed * 100, before_memory_usage, after_memory_usage, perc_increase_memory_usage * 100)) def node_encrypt_wallet(self, passphrase): diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index ded4664ebbac9..3ee741bf53cb4 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -69,6 +69,7 @@ 'mining_pos_coldStaking.py', # ~ 220 sec 'wallet_import_rescan.py', # ~ 204 sec 'p2p_invalid_block.py', # ~ 213 sec + 'p2p_invalid_messages.py', 'feature_reindex.py', # ~ 205 sec 'feature_logging.py', # ~ 195 sec 'wallet_multiwallet.py', # ~ 190 sec From 6a72f0cc12588198250c0d158d1368765cd6f8a3 Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Thu, 24 Jan 2019 17:13:06 -0500 Subject: [PATCH 11/29] qa: Add tests for invalid message headers --- test/functional/p2p_invalid_messages.py | 81 ++++++++++++++++------ test/functional/test_framework/mininode.py | 6 +- 2 files changed, 62 insertions(+), 25 deletions(-) diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py index 558c9ce44c6be..3b28135a2fd30 100755 --- a/test/functional/p2p_invalid_messages.py +++ b/test/functional/p2p_invalid_messages.py @@ -15,7 +15,7 @@ class msg_unrecognized: command = b'badmsg' - def __init__(self, str_data): + def __init__(self, *, str_data): self.str_data = str_data.encode() if not isinstance(str_data, bytes) else str_data def serialize(self): @@ -25,19 +25,14 @@ def __repr__(self): return "{}(data={})".format(self.command, self.str_data) -class msg_nametoolong(msg_unrecognized): - - command = b'thisnameiswayyyyyyyyytoolong' - - class InvalidMessagesTest(PivxTestFramework): - def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True def run_test(self): """ + . Test msg header 0. Send a bunch of large (4MB) messages of an unrecognized type. Check to see that it isn't an effective DoS against the node. @@ -45,10 +40,12 @@ def run_test(self): 2. Send a few messages with an incorrect data size in the header, ensure the messages are ignored. - - 3. Send an unrecognized message with a command name longer than 12 characters. - """ + self.test_magic_bytes() + self.test_checksum() + self.test_size() + self.test_command() + node = self.nodes[0] self.node = node node.add_p2p_connection(P2PDataStore()) @@ -63,7 +60,7 @@ def run_test(self): # Send as large a message as is valid, ensure we aren't disconnected but # also can't exhaust resources. # - msg_at_size = msg_unrecognized("b" * valid_data_limit) + msg_at_size = msg_unrecognized(str_data="b" * valid_data_limit) assert len(msg_at_size.serialize()) == msg_limit with node.assert_memory_usage_stable(increase_allowed=0.5): @@ -90,10 +87,10 @@ def run_test(self): # # Send an oversized message, ensure we're disconnected. # - msg_over_size = msg_unrecognized("b" * (valid_data_limit + 1)) + msg_over_size = msg_unrecognized(str_data="b" * (valid_data_limit + 1)) assert len(msg_over_size.serialize()) == (msg_limit + 1) - with node.assert_debug_log(["Oversized message from peer=1, disconnecting"]): + with node.assert_debug_log(["Oversized message from peer=5, disconnecting"]): # An unknown message type (or *any* message type) over # MAX_PROTOCOL_MESSAGE_LENGTH should result in a disconnect. node.p2p.send_message(msg_over_size) @@ -109,7 +106,7 @@ def run_test(self): # Send messages with an incorrect data size in the header. # actual_size = 100 - msg = msg_unrecognized("b" * actual_size) + msg = msg_unrecognized(str_data="b" * actual_size) # TODO: handle larger-than cases. I haven't been able to pin down what behavior to expect. for wrong_size in (2, 77, 78, 79): @@ -136,18 +133,58 @@ def run_test(self): node.disconnect_p2ps() node.add_p2p_connection(P2PDataStore()) - # - # 3. - # - # Send a message with a too-long command name. - # - node.p2p.send_message(msg_nametoolong("foobar")) - node.p2p.wait_for_disconnect(timeout=4) - # Node is still up. conn = node.add_p2p_connection(P2PDataStore()) conn.sync_with_ping() + def test_magic_bytes(self): + conn = self.nodes[0].add_p2p_connection(P2PDataStore()) + conn.magic_bytes = b'\x00\x11\x22\x32' + with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: INVALID MESSAGESTART ping']): + conn.send_message(messages.msg_ping(nonce=0xff)) + conn.wait_for_disconnect(timeout=1) + self.nodes[0].disconnect_p2ps() + + def test_checksum(self): + conn = self.nodes[0].add_p2p_connection(P2PDataStore()) + with self.nodes[0].assert_debug_log(['ProcessMessages(badmsg, 2 bytes): CHECKSUM ERROR expected 78df0a04 was ffffffff']): + msg = conn.build_message(msg_unrecognized(str_data="d")) + cut_len = ( + 4 + # magic + 12 + # command + 4 #len + ) + # modify checksum + msg = msg[:cut_len] + b'\xff' * 4 + msg[cut_len + 4:] + self.nodes[0].p2p.send_raw_message(msg) + conn.sync_with_ping(timeout=1) + self.nodes[0].disconnect_p2ps() + + def test_size(self): + conn = self.nodes[0].add_p2p_connection(P2PDataStore()) + with self.nodes[0].assert_debug_log(['']): + msg = conn.build_message(msg_unrecognized(str_data="d")) + cut_len = ( + 4 + # magic + 12 # command + ) + # modify len to MAX_SIZE + 1 + msg = msg[:cut_len] + struct.pack(" Date: Sat, 2 Feb 2019 17:49:28 -0500 Subject: [PATCH 12/29] test: Fix race in p2p_invalid_messages --- test/functional/p2p_invalid_messages.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py index 3b28135a2fd30..a28299c6ec4e8 100755 --- a/test/functional/p2p_invalid_messages.py +++ b/test/functional/p2p_invalid_messages.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (c) 2015-2018 The Bitcoin Core developers +# Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node responses to invalid network messages.""" @@ -139,6 +139,7 @@ def run_test(self): def test_magic_bytes(self): conn = self.nodes[0].add_p2p_connection(P2PDataStore()) + conn._on_data = lambda: None # Need to ignore all incoming messages from now, since they come with "invalid" magic bytes conn.magic_bytes = b'\x00\x11\x22\x32' with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: INVALID MESSAGESTART ping']): conn.send_message(messages.msg_ping(nonce=0xff)) @@ -207,6 +208,5 @@ def _tweak_msg_data_size(self, message, wrong_size): return raw_msg_with_wrong_size - if __name__ == '__main__': InvalidMessagesTest().main() From c11f5653f446ba5445c3db32cb0f05615d53b91e Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Fri, 29 Mar 2019 13:44:41 -0400 Subject: [PATCH 13/29] qa: Make swap_magic_bytes in p2p_invalid_messages atomic --- test/functional/p2p_invalid_messages.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py index a28299c6ec4e8..2aaaef3bd46ed 100755 --- a/test/functional/p2p_invalid_messages.py +++ b/test/functional/p2p_invalid_messages.py @@ -3,10 +3,12 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node responses to invalid network messages.""" +import asyncio +import os import struct from test_framework import messages -from test_framework.mininode import P2PDataStore +from test_framework.mininode import P2PDataStore, NetworkThread from test_framework.test_framework import PivxTestFramework @@ -139,8 +141,15 @@ def run_test(self): def test_magic_bytes(self): conn = self.nodes[0].add_p2p_connection(P2PDataStore()) - conn._on_data = lambda: None # Need to ignore all incoming messages from now, since they come with "invalid" magic bytes - conn.magic_bytes = b'\x00\x11\x22\x32' + + def swap_magic_bytes(): + conn._on_data = lambda: None # Need to ignore all incoming messages from now, since they come with "invalid" magic bytes + conn.magic_bytes = b'\x00\x11\x22\x32' + + # Call .result() to block until the atomic swap is complete, otherwise + # we might run into races later on + asyncio.run_coroutine_threadsafe(asyncio.coroutine(swap_magic_bytes)(), NetworkThread.network_event_loop).result() + with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: INVALID MESSAGESTART ping']): conn.send_message(messages.msg_ping(nonce=0xff)) conn.wait_for_disconnect(timeout=1) From 215a6386c517b468ab33f61a7b073b2c8e741a6b Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Tue, 23 Jul 2019 15:10:36 -0400 Subject: [PATCH 14/29] test: Skip flaky p2p_invalid_messages test on macOS --- test/functional/p2p_invalid_messages.py | 28 ++++++++++++++++--------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py index 2aaaef3bd46ed..79ca2f97b5630 100755 --- a/test/functional/p2p_invalid_messages.py +++ b/test/functional/p2p_invalid_messages.py @@ -6,6 +6,7 @@ import asyncio import os import struct +import sys from test_framework import messages from test_framework.mininode import P2PDataStore, NetworkThread @@ -89,18 +90,25 @@ def run_test(self): # # Send an oversized message, ensure we're disconnected. # - msg_over_size = msg_unrecognized(str_data="b" * (valid_data_limit + 1)) - assert len(msg_over_size.serialize()) == (msg_limit + 1) + # Under macOS this test is skipped due to an unexpected error code + # returned from the closing socket which python/asyncio does not + # yet know how to handle. + # + if sys.platform != 'darwin': + msg_over_size = msg_unrecognized(str_data="b" * (valid_data_limit + 1)) + assert len(msg_over_size.serialize()) == (msg_limit + 1) - with node.assert_debug_log(["Oversized message from peer=5, disconnecting"]): - # An unknown message type (or *any* message type) over - # MAX_PROTOCOL_MESSAGE_LENGTH should result in a disconnect. - node.p2p.send_message(msg_over_size) - node.p2p.wait_for_disconnect(timeout=4) + with node.assert_debug_log(["Oversized message from peer=5, disconnecting"]): + # An unknown message type (or *any* message type) over + # MAX_PROTOCOL_MESSAGE_LENGTH should result in a disconnect. + node.p2p.send_message(msg_over_size) + node.p2p.wait_for_disconnect(timeout=4) - node.disconnect_p2ps() - conn = node.add_p2p_connection(P2PDataStore()) - conn.wait_for_verack() + node.disconnect_p2ps() + conn = node.add_p2p_connection(P2PDataStore()) + conn.wait_for_verack() + else: + self.log.info("Skipping test p2p_invalid_messages/1 (oversized message) under macOS") # # 2. From 7f71b1bd55aed0aca4095325bbb29ae27405e811 Mon Sep 17 00:00:00 2001 From: furszy Date: Thu, 3 Jun 2021 16:00:47 -0300 Subject: [PATCH 15/29] Hash P2P messages as they are received instead of at process-time Adaptation of btc@fe1dc62cef88280d2490a619beded052f313c6fc --- src/net.cpp | 8 ++++++++ src/net.h | 5 +++++ src/net_processing.cpp | 2 +- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/src/net.cpp b/src/net.cpp index 2f4450535317a..958df9df026c9 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -797,12 +797,20 @@ int CNetMessage::readData(const char* pch, unsigned int nBytes) vRecv.resize(std::min(hdr.nMessageSize, nDataPos + nCopy + 256 * 1024)); } + hasher.Write((const unsigned char*)pch, nCopy); memcpy(&vRecv[nDataPos], pch, nCopy); nDataPos += nCopy; return nCopy; } +const uint256& CNetMessage::GetMessageHash() const +{ + assert(complete()); + if (data_hash.IsNull()) + hasher.Finalize(data_hash.begin()); + return data_hash; +} // requires LOCK(cs_vSend) size_t CConnman::SocketSendData(CNode* pnode) diff --git a/src/net.h b/src/net.h index f0b6db6ec2c55..056c703e2c9d9 100644 --- a/src/net.h +++ b/src/net.h @@ -486,6 +486,9 @@ class CNodeStats class CNetMessage { +private: + mutable CHash256 hasher; + mutable uint256 data_hash; public: bool in_data; // parsing header (false) or data (true) @@ -513,6 +516,8 @@ class CNetMessage return (hdr.nMessageSize == nDataPos); } + const uint256& GetMessageHash() const; + void SetVersion(int nVersionIn) { hdrbuf.SetVersion(nVersionIn); diff --git a/src/net_processing.cpp b/src/net_processing.cpp index f98d29056fb66..c2dae4e3ed3b2 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -2031,7 +2031,7 @@ bool ProcessMessages(CNode* pfrom, CConnman& connman, std::atomic& interru // Checksum CDataStream& vRecv = msg.vRecv; - uint256 hash = Hash(vRecv.begin(), vRecv.begin() + nMessageSize); + uint256 hash = msg.GetMessageHash(); if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0) { LogPrint(BCLog::NET, "%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s\n", __func__, From 33c7b19109855c3e74930c10aa5da5d37e0d7b32 Mon Sep 17 00:00:00 2001 From: furszy Date: Thu, 3 Jun 2021 16:02:00 -0300 Subject: [PATCH 16/29] net_processing: align msg checksum check properly. --- src/net_processing.cpp | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index c2dae4e3ed3b2..cea1ab73104bd 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -2029,17 +2029,17 @@ bool ProcessMessages(CNode* pfrom, CConnman& connman, std::atomic& interru // Message size unsigned int nMessageSize = hdr.nMessageSize; - // Checksum - CDataStream& vRecv = msg.vRecv; - uint256 hash = msg.GetMessageHash(); - if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0) - { - LogPrint(BCLog::NET, "%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s\n", __func__, - SanitizeString(strCommand), nMessageSize, - HexStr(hash.begin(), hash.begin()+CMessageHeader::CHECKSUM_SIZE), - HexStr(hdr.pchChecksum, hdr.pchChecksum+CMessageHeader::CHECKSUM_SIZE)); - return fMoreWork; - } + // Checksum + CDataStream& vRecv = msg.vRecv; + uint256 hash = msg.GetMessageHash(); + if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0) + { + LogPrint(BCLog::NET, "%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s\n", __func__, + SanitizeString(strCommand), nMessageSize, + HexStr(hash.begin(), hash.begin()+CMessageHeader::CHECKSUM_SIZE), + HexStr(hdr.pchChecksum, hdr.pchChecksum+CMessageHeader::CHECKSUM_SIZE)); + return fMoreWork; + } // Process message bool fRet = false; From 3472a39db8c9f3010f87785f68cab8d1d7e36ba8 Mon Sep 17 00:00:00 2001 From: Elichai Turkel Date: Wed, 15 Jan 2020 14:16:27 +0200 Subject: [PATCH 17/29] Replace coroutine with async def in p2p_invalid_messages.py --- test/functional/p2p_invalid_messages.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py index 79ca2f97b5630..9808c0a52474a 100755 --- a/test/functional/p2p_invalid_messages.py +++ b/test/functional/p2p_invalid_messages.py @@ -150,13 +150,13 @@ def run_test(self): def test_magic_bytes(self): conn = self.nodes[0].add_p2p_connection(P2PDataStore()) - def swap_magic_bytes(): + async def swap_magic_bytes(): conn._on_data = lambda: None # Need to ignore all incoming messages from now, since they come with "invalid" magic bytes conn.magic_bytes = b'\x00\x11\x22\x32' # Call .result() to block until the atomic swap is complete, otherwise # we might run into races later on - asyncio.run_coroutine_threadsafe(asyncio.coroutine(swap_magic_bytes)(), NetworkThread.network_event_loop).result() + asyncio.run_coroutine_threadsafe(swap_magic_bytes(), NetworkThread.network_event_loop).result() with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: INVALID MESSAGESTART ping']): conn.send_message(messages.msg_ping(nonce=0xff)) From c02e9a0d81d6359be95d3ab4f298bbd4a42e6fa5 Mon Sep 17 00:00:00 2001 From: furszy Date: Fri, 4 Jun 2021 11:00:59 -0300 Subject: [PATCH 18/29] [Test] move framework subversion to string --- test/functional/test_framework/messages.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index 7ff905495495a..8133197533493 100644 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -28,7 +28,7 @@ MIN_VERSION_SUPPORTED = 60001 MY_VERSION = 70922 -MY_SUBVERSION = b"/python-mininode-tester:0.0.3/" +MY_SUBVERSION = "/python-mininode-tester:0.0.3/" MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37) MAX_INV_SZ = 50000 @@ -927,7 +927,7 @@ def deserialize(self, f): self.addrFrom = CAddress() self.addrFrom.deserialize(f) self.nNonce = struct.unpack(" Date: Fri, 4 Jun 2021 11:02:28 -0300 Subject: [PATCH 19/29] Test framework: Wait for verack by default on every new connection --- test/functional/test_framework/mininode.py | 8 ++++++++ test/functional/test_framework/test_node.py | 22 ++++++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py index 670e0106c5d28..ce2a17f172b4b 100755 --- a/test/functional/test_framework/mininode.py +++ b/test/functional/test_framework/mininode.py @@ -327,6 +327,14 @@ def on_version(self, message): # Connection helper methods + def wait_until(self, test_function_in, *, timeout=60, check_connected=True): + def test_function(): + if check_connected: + assert self.is_connected + return test_function_in() + + wait_until(test_function, timeout=timeout, lock=mininode_lock) + def wait_for_disconnect(self, timeout=60): test_function = lambda: not self.is_connected wait_until(test_function, timeout=timeout, lock=mininode_lock) diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index b5086dabe08a4..98f16a3e01700 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -24,6 +24,7 @@ wait_until, p2p_port, ) +from .messages import MY_SUBVERSION # For Python 3.4 compatibility JSONDecodeError = getattr(json, "JSONDecodeError", ValueError) @@ -279,7 +280,7 @@ def node_encrypt_wallet(self, passphrase): self.encryptwallet(passphrase) self.wait_until_stopped() - def add_p2p_connection(self, p2p_conn, *args, **kwargs): + def add_p2p_connection(self, p2p_conn, *args, wait_for_verack=True, **kwargs): """Add a p2p connection to the node. This method adds the p2p connection to the self.p2ps list and also @@ -291,6 +292,25 @@ def add_p2p_connection(self, p2p_conn, *args, **kwargs): p2p_conn.peer_connect(*args, **kwargs)() self.p2ps.append(p2p_conn) + p2p_conn.wait_until(lambda: p2p_conn.is_connected, check_connected=False) + if wait_for_verack: + # Wait for the node to send us the version and verack + p2p_conn.wait_for_verack() + # At this point we have sent our version message and received the version and verack, however the full node + # has not yet received the verack from us (in reply to their version). So, the connection is not yet fully + # established (fSuccessfullyConnected). + # + # This shouldn't lead to any issues when sending messages, since the verack will be in-flight before the + # message we send. However, it might lead to races where we are expecting to receive a message. E.g. a + # transaction that will be added to the mempool as soon as we return here. + # + # So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds) + # in comparison to the upside of making tests less fragile and unexpected intermittent errors less likely. + p2p_conn.sync_with_ping() + # Consistency check that the PIVX Core has received our user agent string. This checks the + # node's newest peer. It could be racy if another PIVX Core node has connected since we opened + # our connection, but we don't expect that to happen. + assert_equal(self.getpeerinfo()[-1]['subver'], MY_SUBVERSION) return p2p_conn From f68e22ceafceca73e6560a9effdc217b9b9d632b Mon Sep 17 00:00:00 2001 From: furszy Date: Fri, 4 Jun 2021 12:25:19 -0300 Subject: [PATCH 20/29] [Test] p2p_invalid_messages.py do not change msg.command in test_command and raise sync_with_ping timeout --- test/functional/p2p_invalid_messages.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py index 9808c0a52474a..217046fb86eb1 100755 --- a/test/functional/p2p_invalid_messages.py +++ b/test/functional/p2p_invalid_messages.py @@ -82,7 +82,7 @@ def run_test(self): # Peer 1, despite serving up a bunch of nonsense, should still be connected. self.log.info("Waiting for node to drop junk messages.") - node.p2p.sync_with_ping(timeout=30) + node.p2p.sync_with_ping(timeout=320) assert node.p2p.is_connected # @@ -158,7 +158,7 @@ async def swap_magic_bytes(): # we might run into races later on asyncio.run_coroutine_threadsafe(swap_magic_bytes(), NetworkThread.network_event_loop).result() - with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: INVALID MESSAGESTART ping']): + with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: INVALID MESSAGESTART ping peer=1']): conn.send_message(messages.msg_ping(nonce=0xff)) conn.wait_for_disconnect(timeout=1) self.nodes[0].disconnect_p2ps() @@ -196,7 +196,6 @@ def test_command(self): conn = self.nodes[0].add_p2p_connection(P2PDataStore()) with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: ERRORS IN HEADER']): msg = msg_unrecognized(str_data="d") - msg.command = b'\xff' * 12 msg = conn.build_message(msg) # Modify command msg = msg[:7] + b'\x00' + msg[7 + 1:] From 8aaf7e14f86e6eec9d385ee1a6ed2a5b74ef1579 Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Wed, 13 Nov 2019 16:25:50 -0500 Subject: [PATCH 21/29] test: Remove fragile assert_memory_usage_stable --- test/functional/p2p_invalid_messages.py | 28 ++++++------- test/functional/test_framework/test_node.py | 46 --------------------- 2 files changed, 12 insertions(+), 62 deletions(-) diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py index 217046fb86eb1..264877058b626 100755 --- a/test/functional/p2p_invalid_messages.py +++ b/test/functional/p2p_invalid_messages.py @@ -4,7 +4,6 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node responses to invalid network messages.""" import asyncio -import os import struct import sys @@ -66,24 +65,21 @@ def run_test(self): msg_at_size = msg_unrecognized(str_data="b" * valid_data_limit) assert len(msg_at_size.serialize()) == msg_limit - with node.assert_memory_usage_stable(increase_allowed=0.5): - self.log.info( - "Sending a bunch of large, junk messages to test " - "memory exhaustion. May take a bit...") + self.log.info("Sending a bunch of large, junk messages to test memory exhaustion. May take a bit...") - # Run a bunch of times to test for memory exhaustion. - for _ in range(80): - node.p2p.send_message(msg_at_size) + # Run a bunch of times to test for memory exhaustion. + for _ in range(80): + node.p2p.send_message(msg_at_size) - # Check that, even though the node is being hammered by nonsense from one - # connection, it can still service other peers in a timely way. - for _ in range(20): - conn2.sync_with_ping(timeout=2) + # Check that, even though the node is being hammered by nonsense from one + # connection, it can still service other peers in a timely way. + for _ in range(20): + conn2.sync_with_ping(timeout=2) - # Peer 1, despite serving up a bunch of nonsense, should still be connected. - self.log.info("Waiting for node to drop junk messages.") - node.p2p.sync_with_ping(timeout=320) - assert node.p2p.is_connected + # Peer 1, despite serving up a bunch of nonsense, should still be connected. + self.log.info("Waiting for node to drop junk messages.") + node.p2p.sync_with_ping(timeout=320) + assert node.p2p.is_connected # # 1. diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 98f16a3e01700..7ac0b535eb124 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -82,25 +82,6 @@ def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mo self.p2ps = [] - def get_mem_rss_kilobytes(self): - """Get the memory usage (RSS) per `ps`. - - Returns None if `ps` is unavailable. - """ - assert self.running - - try: - return int(subprocess.check_output( - ["ps", "h", "-o", "rss", "{}".format(self.process.pid)], - stderr=subprocess.DEVNULL).split()[-1]) - - # Avoid failing on platforms where ps isn't installed. - # - # We could later use something like `psutils` to work across platforms. - except (FileNotFoundError, subprocess.SubprocessError): - self.log.exception("Unable to get memory usage") - return None - def __del__(self): # Ensure that we don't leave any bitcoind processes lying around after # the test ends @@ -245,33 +226,6 @@ def assert_debug_log(self, expected_msgs): if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None: raise AssertionError('Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log)) - @contextlib.contextmanager - def assert_memory_usage_stable(self, *, increase_allowed=0.03): - """Context manager that allows the user to assert that a node's memory usage (RSS) - hasn't increased beyond some threshold percentage. - - Args: - increase_allowed (float): the fractional increase in memory allowed until failure; - e.g. `0.12` for up to 12% increase allowed. - """ - before_memory_usage = self.get_mem_rss_kilobytes() - - yield - - after_memory_usage = self.get_mem_rss_kilobytes() - - if not (before_memory_usage and after_memory_usage): - self.log.warning("Unable to detect memory usage (RSS) - skipping memory check.") - return - - perc_increase_memory_usage = (after_memory_usage / before_memory_usage) - 1 - - if perc_increase_memory_usage > increase_allowed: - raise AssertionError( - "Memory usage increased over threshold of {:.3f}% from {} to {} ({:.3f}%)".format( - increase_allowed * 100, before_memory_usage, after_memory_usage, - perc_increase_memory_usage * 100)) - def node_encrypt_wallet(self, passphrase): """"Encrypts the wallet. From 1404e68a573690703ad550d9e8903ed7c5453bf7 Mon Sep 17 00:00:00 2001 From: furszy Date: Fri, 4 Jun 2021 13:00:22 -0300 Subject: [PATCH 22/29] test: Add various low-level p2p tests Adaptation of btc@fa4c29bc1d2425f861845bae4f3816d9817e622a with some needed customizations for us. --- src/net_processing.cpp | 6 ++-- test/functional/p2p_invalid_messages.py | 17 ++++++++++- test/functional/p2p_invalid_tx.py | 40 +++++++++++++++++-------- 3 files changed, 46 insertions(+), 17 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index cea1ab73104bd..788124e453fd5 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1352,7 +1352,7 @@ bool static ProcessMessage(CNode* pfrom, std::string strCommand, CDataStream& vR if (vInv.size() > MAX_INV_SZ) { LOCK(cs_main); Misbehaving(pfrom->GetId(), 20); - return error("message inv size() = %u", vInv.size()); + return error("peer=%d message inv size() = %u", pfrom->GetId(), vInv.size()); } LOCK(cs_main); @@ -1402,7 +1402,7 @@ bool static ProcessMessage(CNode* pfrom, std::string strCommand, CDataStream& vR if (vInv.size() > MAX_INV_SZ) { LOCK(cs_main); Misbehaving(pfrom->GetId(), 20); - return error("message getdata size() = %u", vInv.size()); + return error("peer=%d message getdata size() = %u", pfrom->GetId(), vInv.size()); } if (vInv.size() != 1) @@ -2021,7 +2021,7 @@ bool ProcessMessages(CNode* pfrom, CConnman& connman, std::atomic& interru // Read header CMessageHeader& hdr = msg.hdr; if (!hdr.IsValid(Params().MessageStart())) { - LogPrint(BCLog::NET, "PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->id); + LogPrint(BCLog::NET, "PROCESSMESSAGE: ERRORS IN HEADER '%s' peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->id); return fMoreWork; } std::string strCommand = hdr.GetCommand(); diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py index 264877058b626..a5e14a39afa8d 100755 --- a/test/functional/p2p_invalid_messages.py +++ b/test/functional/p2p_invalid_messages.py @@ -8,7 +8,11 @@ import sys from test_framework import messages -from test_framework.mininode import P2PDataStore, NetworkThread +from test_framework.mininode import ( + NetworkThread, + P2PDataStore, + P2PInterface, +) from test_framework.test_framework import PivxTestFramework @@ -47,6 +51,7 @@ def run_test(self): self.test_checksum() self.test_size() self.test_command() + self.test_large_inv() node = self.nodes[0] self.node = node @@ -199,6 +204,16 @@ def test_command(self): conn.sync_with_ping(timeout=1) self.nodes[0].disconnect_p2ps() + def test_large_inv(self): # future: add Misbehaving value check, first invalid message raise it to 20, second to 40. + conn = self.nodes[0].add_p2p_connection(P2PInterface()) + with self.nodes[0].assert_debug_log(['ERROR: peer=5 message inv size() = 50001']): + msg = messages.msg_inv([messages.CInv(1, 1)] * 50001) + conn.send_and_ping(msg) + with self.nodes[0].assert_debug_log(['ERROR: peer=5 message getdata size() = 50001']): + msg = messages.msg_getdata([messages.CInv(1, 1)] * 50001) + conn.send_and_ping(msg) + self.nodes[0].disconnect_p2ps() + def _tweak_msg_data_size(self, message, wrong_size): """ Return a raw message based on another message but with an incorrect data size in diff --git a/test/functional/p2p_invalid_tx.py b/test/functional/p2p_invalid_tx.py index 9705f7fb6788c..e59ba6b260a3e 100755 --- a/test/functional/p2p_invalid_tx.py +++ b/test/functional/p2p_invalid_tx.py @@ -13,11 +13,12 @@ CTxIn, CTxOut, ) -from test_framework.mininode import P2PDataStore, network_thread_join +from test_framework.mininode import P2PDataStore from test_framework.script import ( CScript, OP_NOTIF, OP_TRUE, + OP_DROP ) from test_framework.test_framework import PivxTestFramework from test_framework.util import ( @@ -47,15 +48,11 @@ def reconnect_p2p(self, **kwargs): self.nodes[0].disconnect_p2ps() self.bootstrap_p2p(**kwargs) - def new_spend_tx(self, prev_hash, prev_n, values): - """Create a CTransaction spending COutPoint(prev_hash, prev_n) - - each amount specified in the 'values' list is sent to an - anyone-can-spend script""" + def new_spend_tx(self, prev_hash, prev_n, tx_outs): + """Create a CTransaction spending COutPoint(prev_hash, prev_n) to the CTxOut-list tx_outs.""" tx = CTransaction() tx.vin.append(CTxIn(outpoint=COutPoint(prev_hash, prev_n))) - for value in values: - tx.vout.append(CTxOut(nValue=value, scriptPubKey=CScript([OP_TRUE]))) + tx.vout = tx_outs tx.calc_sha256() return tx @@ -93,21 +90,22 @@ def run_test(self): self.reconnect_p2p(num_connections=2) self.log.info('Test orphan transaction handling ... ') + SCRIPT_PUB_KEY_OP_TRUE = CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]) # Create a root transaction that we withhold until all dependend transactions # are sent out and in the orphan cache - tx_withhold = self.new_spend_tx(block1.vtx[0].sha256, 0, [50 * COIN - 12000]) + tx_withhold = self.new_spend_tx(block1.vtx[0].sha256, 0, [CTxOut(nValue=50 * COIN - 12000, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE)]) # Our first orphan tx with 3 outputs to create further orphan txs - tx_orphan_1 = self.new_spend_tx(tx_withhold.sha256, 0, [10 * COIN] * 3) + tx_orphan_1 = self.new_spend_tx(tx_withhold.sha256, 0, [CTxOut(nValue=10 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE)] * 3) # A valid transaction with low fee - tx_orphan_2_no_fee = self.new_spend_tx(tx_orphan_1.sha256, 0, [10 * COIN]) + tx_orphan_2_no_fee = self.new_spend_tx(tx_orphan_1.sha256, 0, [CTxOut(nValue=10 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE)]) # A valid transaction with sufficient fee - tx_orphan_2_valid = self.new_spend_tx(tx_orphan_1.sha256, 1, [10 * COIN - 12000]) + tx_orphan_2_valid = self.new_spend_tx(tx_orphan_1.sha256, 1, [CTxOut(nValue=10 * COIN - 12000, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE)]) # An invalid transaction with negative fee - tx_orphan_2_invalid = self.new_spend_tx(tx_orphan_1.sha256, 2, [11 * COIN]) + tx_orphan_2_invalid = self.new_spend_tx(tx_orphan_1.sha256, 2, [CTxOut(nValue=11 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE)]) self.log.info('Send the orphans ... ') # Send valid orphan txs from p2ps[0] @@ -139,6 +137,22 @@ def run_test(self): #wait_until(lambda: 1 == len(node.getpeerinfo()), timeout=12) # p2ps[1] is no longer connected assert_equal(expected_mempool, set(node.getrawmempool())) + self.log.info('Test orphan pool overflow') + orphan_tx_pool = [CTransaction() for _ in range(101)] + for i in range(len(orphan_tx_pool)): + orphan_tx_pool[i].vin.append(CTxIn(outpoint=COutPoint(i, 333))) + orphan_tx_pool[i].vout.append(CTxOut(nValue=11 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE)) + + with node.assert_debug_log(['mapOrphan overflow, removed 1 tx']): + node.p2p.send_txs_and_test(orphan_tx_pool, node, success=False) + + rejected_parent = CTransaction() + rejected_parent.vin.append(CTxIn(outpoint=COutPoint(tx_orphan_2_invalid.sha256, 0))) + rejected_parent.vout.append(CTxOut(nValue=11 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE)) + rejected_parent.rehash() + with node.assert_debug_log(['not keeping orphan with rejected parents {}'.format(rejected_parent.hash)]): + node.p2p.send_txs_and_test([rejected_parent], node, success=False) + if __name__ == '__main__': InvalidTxRequestTest().main() From b3391c5f7f7e527d251041cc986879a1c316a3dc Mon Sep 17 00:00:00 2001 From: Troy Giorshev Date: Thu, 4 Jun 2020 11:49:09 -0400 Subject: [PATCH 23/29] Remove two unneeded tests Test 1 is a duplicate of test_size() later in the file. Inexplicably, this test does not work on macOS, whereas test_size() does. Test 2 is problematic for two reasons. First, it always fails with an invalid checksum, which is probably not what was intended. Second, it's not defined at this layer what the behavior should be. Hypothetically, if this test was fixed so that it gave messages with valid checksums, then the message would pass successfully thought the network layer and fail only in the processing layer. A priori the network layer has no idea what the size of a message "actually" is. The "Why does behavior change at 78 bytes" is because of the following: print(len(node.p2p.build_message(msg))) # 125 => Payload size = 125 - 24 = 101 If we take 77 bytes, then there are 101 - 77 = 24 left That's exactly the size of a header So, bitcoind deserializes the header and rejects it for some other reason (Almost always an invalid size (too large)) But, if we take 78 bytes, then there are 101 - 78 = 23 left That's not enough to fill a header, so the socket stays open waiting for more data. That's why we sometimes have to push additional data in order for the peer to disconnect. Additionally, both of these tests use the "conn" variable. For fun, go look at where it's declared. (Hint: test_large_inv(). Don't we all love python's idea of scope?) --- test/functional/p2p_invalid_messages.py | 88 ------------------------- 1 file changed, 88 deletions(-) diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py index a5e14a39afa8d..0000a48b65bce 100755 --- a/test/functional/p2p_invalid_messages.py +++ b/test/functional/p2p_invalid_messages.py @@ -5,7 +5,6 @@ """Test node responses to invalid network messages.""" import asyncio import struct -import sys from test_framework import messages from test_framework.mininode import ( @@ -41,11 +40,6 @@ def run_test(self): . Test msg header 0. Send a bunch of large (4MB) messages of an unrecognized type. Check to see that it isn't an effective DoS against the node. - - 1. Send an oversized (4MB+) message and check that we're disconnected. - - 2. Send a few messages with an incorrect data size in the header, ensure the - messages are ignored. """ self.test_magic_bytes() self.test_checksum() @@ -86,68 +80,6 @@ def run_test(self): node.p2p.sync_with_ping(timeout=320) assert node.p2p.is_connected - # - # 1. - # - # Send an oversized message, ensure we're disconnected. - # - # Under macOS this test is skipped due to an unexpected error code - # returned from the closing socket which python/asyncio does not - # yet know how to handle. - # - if sys.platform != 'darwin': - msg_over_size = msg_unrecognized(str_data="b" * (valid_data_limit + 1)) - assert len(msg_over_size.serialize()) == (msg_limit + 1) - - with node.assert_debug_log(["Oversized message from peer=5, disconnecting"]): - # An unknown message type (or *any* message type) over - # MAX_PROTOCOL_MESSAGE_LENGTH should result in a disconnect. - node.p2p.send_message(msg_over_size) - node.p2p.wait_for_disconnect(timeout=4) - - node.disconnect_p2ps() - conn = node.add_p2p_connection(P2PDataStore()) - conn.wait_for_verack() - else: - self.log.info("Skipping test p2p_invalid_messages/1 (oversized message) under macOS") - - # - # 2. - # - # Send messages with an incorrect data size in the header. - # - actual_size = 100 - msg = msg_unrecognized(str_data="b" * actual_size) - - # TODO: handle larger-than cases. I haven't been able to pin down what behavior to expect. - for wrong_size in (2, 77, 78, 79): - self.log.info("Sending a message with incorrect size of {}".format(wrong_size)) - - # Unmodified message should submit okay. - node.p2p.send_and_ping(msg) - - # A message lying about its data size results in a disconnect when the incorrect - # data size is less than the actual size. - # - # TODO: why does behavior change at 78 bytes? - # - node.p2p.send_raw_message(self._tweak_msg_data_size(msg, wrong_size)) - - # For some reason unknown to me, we sometimes have to push additional data to the - # peer in order for it to realize a disconnect. - try: - node.p2p.send_message(messages.msg_ping(nonce=123123)) - except IOError: - pass - - node.p2p.wait_for_disconnect(timeout=10) - node.disconnect_p2ps() - node.add_p2p_connection(P2PDataStore()) - - # Node is still up. - conn = node.add_p2p_connection(P2PDataStore()) - conn.sync_with_ping() - def test_magic_bytes(self): conn = self.nodes[0].add_p2p_connection(P2PDataStore()) @@ -214,26 +146,6 @@ def test_large_inv(self): # future: add Misbehaving value check, first invalid m conn.send_and_ping(msg) self.nodes[0].disconnect_p2ps() - def _tweak_msg_data_size(self, message, wrong_size): - """ - Return a raw message based on another message but with an incorrect data size in - the message header. - """ - raw_msg = self.node.p2p.build_message(message) - - bad_size_bytes = struct.pack(" Date: Thu, 4 Jun 2020 11:52:23 -0400 Subject: [PATCH 24/29] Move size limits to module-global As well, this renames those variables to match PEP8 and this clears up the comment relating to VALID_DATA_LIMIT. Admittedly, this commit is mainly to make the following ones cleaner. --- test/functional/p2p_invalid_messages.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py index 0000a48b65bce..4c4bc7d58eb4e 100755 --- a/test/functional/p2p_invalid_messages.py +++ b/test/functional/p2p_invalid_messages.py @@ -14,6 +14,8 @@ ) from test_framework.test_framework import PivxTestFramework +MSG_LIMIT = 4 * 1000 * 1000 # 4MB, per MAX_PROTOCOL_MESSAGE_LENGTH +VALID_DATA_LIMIT = MSG_LIMIT - 5 # Account for the 5-byte length prefix class msg_unrecognized: """Nonsensical message. Modeled after similar types in test_framework.messages.""" @@ -52,8 +54,6 @@ def run_test(self): node.add_p2p_connection(P2PDataStore()) conn2 = node.add_p2p_connection(P2PDataStore()) - msg_limit = 4 * 1000 * 1000 # 4MB, per MAX_PROTOCOL_MESSAGE_LENGTH - valid_data_limit = msg_limit - 5 # Account for the 4-byte length prefix # # 0. @@ -61,8 +61,8 @@ def run_test(self): # Send as large a message as is valid, ensure we aren't disconnected but # also can't exhaust resources. # - msg_at_size = msg_unrecognized(str_data="b" * valid_data_limit) - assert len(msg_at_size.serialize()) == msg_limit + msg_at_size = msg_unrecognized(str_data="b" * VALID_DATA_LIMIT) + assert len(msg_at_size.serialize()) == MSG_LIMIT self.log.info("Sending a bunch of large, junk messages to test memory exhaustion. May take a bit...") From 589a7803630c1e8809e917ff8d4c846fef3f86eb Mon Sep 17 00:00:00 2001 From: Troy Giorshev Date: Thu, 4 Jun 2020 13:06:17 -0400 Subject: [PATCH 25/29] Fix "invalid message size" test This test originally made a message with an invalid stated length, and an invalid checksum. This was because only the header was changed, but the checksum stayed the same. This was fine for now because we check the header first to see if it has a valid stated size, and we disconnect if it does not, so we never end up checking for the checksum. If this behavior was to change, this test would become a problem. (Indeed I discovered this when playing around with this behavior). By instead creating a message with an oversized payload from the start, we create a message with an invalid stated length but a valid checksum, as intended. Additionally, this takes advantage to the newly module-global VALID_DATA_LIMIT as opposed to the magic 0x02000000. Yes, 4MB < 32MiB, but at the moment when receiving a message we check both, so this makes the test tighter. --- test/functional/p2p_invalid_messages.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py index 4c4bc7d58eb4e..c7d217807b089 100755 --- a/test/functional/p2p_invalid_messages.py +++ b/test/functional/p2p_invalid_messages.py @@ -4,7 +4,6 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node responses to invalid network messages.""" import asyncio -import struct from test_framework import messages from test_framework.mininode import ( @@ -114,13 +113,9 @@ def test_checksum(self): def test_size(self): conn = self.nodes[0].add_p2p_connection(P2PDataStore()) with self.nodes[0].assert_debug_log(['']): - msg = conn.build_message(msg_unrecognized(str_data="d")) - cut_len = ( - 4 + # magic - 12 # command - ) - # modify len to MAX_SIZE + 1 - msg = msg[:cut_len] + struct.pack(" Date: Thu, 4 Jun 2020 13:06:29 -0400 Subject: [PATCH 26/29] Refactor resource exhaustion test This is a simple refactor of the specified test. It is now brought in line with the rest of the tests in the module. This should make things easier to debug, as all of the tests are now grouped together at the top. --- test/functional/p2p_invalid_messages.py | 60 ++++++++++--------------- 1 file changed, 24 insertions(+), 36 deletions(-) diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py index c7d217807b089..b5a703c80d784 100755 --- a/test/functional/p2p_invalid_messages.py +++ b/test/functional/p2p_invalid_messages.py @@ -37,47 +37,12 @@ def set_test_params(self): self.setup_clean_chain = True def run_test(self): - """ - . Test msg header - 0. Send a bunch of large (4MB) messages of an unrecognized type. Check to see - that it isn't an effective DoS against the node. - """ self.test_magic_bytes() self.test_checksum() self.test_size() self.test_command() self.test_large_inv() - - node = self.nodes[0] - self.node = node - node.add_p2p_connection(P2PDataStore()) - conn2 = node.add_p2p_connection(P2PDataStore()) - - - # - # 0. - # - # Send as large a message as is valid, ensure we aren't disconnected but - # also can't exhaust resources. - # - msg_at_size = msg_unrecognized(str_data="b" * VALID_DATA_LIMIT) - assert len(msg_at_size.serialize()) == MSG_LIMIT - - self.log.info("Sending a bunch of large, junk messages to test memory exhaustion. May take a bit...") - - # Run a bunch of times to test for memory exhaustion. - for _ in range(80): - node.p2p.send_message(msg_at_size) - - # Check that, even though the node is being hammered by nonsense from one - # connection, it can still service other peers in a timely way. - for _ in range(20): - conn2.sync_with_ping(timeout=2) - - # Peer 1, despite serving up a bunch of nonsense, should still be connected. - self.log.info("Waiting for node to drop junk messages.") - node.p2p.sync_with_ping(timeout=320) - assert node.p2p.is_connected + self.test_resource_exhaustion() def test_magic_bytes(self): conn = self.nodes[0].add_p2p_connection(P2PDataStore()) @@ -141,6 +106,29 @@ def test_large_inv(self): # future: add Misbehaving value check, first invalid m conn.send_and_ping(msg) self.nodes[0].disconnect_p2ps() + def test_resource_exhaustion(self): + conn = self.nodes[0].add_p2p_connection(P2PDataStore()) + conn2 = self.nodes[0].add_p2p_connection(P2PDataStore()) + msg_at_size = msg_unrecognized(str_data="b" * VALID_DATA_LIMIT) + assert len(msg_at_size.serialize()) == MSG_LIMIT + + self.log.info("Sending a bunch of large, junk messages to test memory exhaustion. May take a bit...") + + # Run a bunch of times to test for memory exhaustion. + for _ in range(80): + conn.send_message(msg_at_size) + + # Check that, even though the node is being hammered by nonsense from one + # connection, it can still service other peers in a timely way. + for _ in range(20): + conn2.sync_with_ping(timeout=2) + + # Peer 1, despite being served up a bunch of nonsense, should still be connected. + self.log.info("Waiting for node to drop junk messages.") + conn.sync_with_ping(timeout=400) + assert conn.is_connected + self.nodes[0].disconnect_p2ps() + if __name__ == '__main__': InvalidMessagesTest().main() From 0aedf35c1fb0e317c666d2f00482838b2da4d41c Mon Sep 17 00:00:00 2001 From: John Newbery Date: Fri, 12 Jun 2020 22:55:02 -0400 Subject: [PATCH 27/29] [tests] Don't import asyncio to test magic bytes --- test/functional/p2p_invalid_messages.py | 28 +++++++------------------ 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py index b5a703c80d784..4c61db74e0367 100755 --- a/test/functional/p2p_invalid_messages.py +++ b/test/functional/p2p_invalid_messages.py @@ -3,11 +3,8 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node responses to invalid network messages.""" -import asyncio - from test_framework import messages from test_framework.mininode import ( - NetworkThread, P2PDataStore, P2PInterface, ) @@ -46,17 +43,11 @@ def run_test(self): def test_magic_bytes(self): conn = self.nodes[0].add_p2p_connection(P2PDataStore()) - - async def swap_magic_bytes(): - conn._on_data = lambda: None # Need to ignore all incoming messages from now, since they come with "invalid" magic bytes - conn.magic_bytes = b'\x00\x11\x22\x32' - - # Call .result() to block until the atomic swap is complete, otherwise - # we might run into races later on - asyncio.run_coroutine_threadsafe(swap_magic_bytes(), NetworkThread.network_event_loop).result() - - with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: INVALID MESSAGESTART ping peer=1']): - conn.send_message(messages.msg_ping(nonce=0xff)) + with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: INVALID MESSAGESTART badmsg']): + msg = conn.build_message(msg_unrecognized(str_data="d")) + # modify magic bytes + msg = b'\xff' * 4 + msg[4:] + conn.send_raw_message(msg) conn.wait_for_disconnect(timeout=1) self.nodes[0].disconnect_p2ps() @@ -64,11 +55,8 @@ def test_checksum(self): conn = self.nodes[0].add_p2p_connection(P2PDataStore()) with self.nodes[0].assert_debug_log(['ProcessMessages(badmsg, 2 bytes): CHECKSUM ERROR expected 78df0a04 was ffffffff']): msg = conn.build_message(msg_unrecognized(str_data="d")) - cut_len = ( - 4 + # magic - 12 + # command - 4 #len - ) + # Checksum is after start bytes (4B), message type (12B), len (4B) + cut_len = 4 + 12 + 4 # modify checksum msg = msg[:cut_len] + b'\xff' * 4 + msg[cut_len + 4:] self.nodes[0].p2p.send_raw_message(msg) @@ -79,7 +67,7 @@ def test_size(self): conn = self.nodes[0].add_p2p_connection(P2PDataStore()) with self.nodes[0].assert_debug_log(['']): # Create a message with oversized payload - msg = msg_unrecognized(str_data="d"*(VALID_DATA_LIMIT + 1)) + msg = msg_unrecognized(str_data="d" * (VALID_DATA_LIMIT + 1)) msg = conn.build_message(msg) self.nodes[0].p2p.send_raw_message(msg) conn.wait_for_disconnect(timeout=1) From 15a799eab560d872041b614ae71f642fad61169e Mon Sep 17 00:00:00 2001 From: furszy Date: Fri, 4 Jun 2021 16:42:27 -0300 Subject: [PATCH 28/29] [Test] MAX_PROTOCOL_MESSAGE_LENGTH PIVXified. --- test/functional/p2p_invalid_messages.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py index 4c61db74e0367..bc513307cc4e0 100755 --- a/test/functional/p2p_invalid_messages.py +++ b/test/functional/p2p_invalid_messages.py @@ -10,7 +10,7 @@ ) from test_framework.test_framework import PivxTestFramework -MSG_LIMIT = 4 * 1000 * 1000 # 4MB, per MAX_PROTOCOL_MESSAGE_LENGTH +MSG_LIMIT = 2 * 1024 * 1024 # 2MB, per MAX_PROTOCOL_MESSAGE_LENGTH VALID_DATA_LIMIT = MSG_LIMIT - 5 # Account for the 5-byte length prefix class msg_unrecognized: From 7b04c0a4d4f83a88b1fea65588fd0e6253f1faae Mon Sep 17 00:00:00 2001 From: furszy Date: Fri, 4 Jun 2021 19:08:10 -0300 Subject: [PATCH 29/29] [Test] Clean duplicate connections creation in mining_pos_coldStaking.py --- test/functional/mining_pos_coldStaking.py | 29 +++------------------- test/functional/test_framework/mininode.py | 3 +-- 2 files changed, 5 insertions(+), 27 deletions(-) diff --git a/test/functional/mining_pos_coldStaking.py b/test/functional/mining_pos_coldStaking.py index b0ff88818085b..05aaa41924de6 100755 --- a/test/functional/mining_pos_coldStaking.py +++ b/test/functional/mining_pos_coldStaking.py @@ -8,14 +8,12 @@ from time import sleep from test_framework.messages import CTransaction, CTxIn, CTxOut, COIN, COutPoint -from test_framework.pivx_node import PivxTestNode from test_framework.script import CScript, OP_CHECKSIG from test_framework.test_framework import PivxTestFramework from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, - p2p_port, bytes_to_hex_str, set_node_times, ) @@ -33,28 +31,8 @@ def set_test_params(self): self.num_nodes = 3 self.extra_args = [['-nuparams=v5_shield:201']] * self.num_nodes self.extra_args[0].append('-sporkkey=932HEevBSujW2ud7RfB1YF91AFygbBRQj3de3LyaCRqNzKKgWXi') - - def setup_chain(self): - # Start with PoW cache: 200 blocks - self.log.info("Initializing test directory " + self.options.tmpdir) - self._initialize_chain() self.enable_mocktime() - def init_test(self): - title = "*** Starting %s ***" % self.__class__.__name__ - underline = "-" * len(title) - self.log.info("\n\n%s\n%s\n%s\n", title, underline, self.description) - self.DEFAULT_FEE = 0.05 - # Setup the p2p connections and start up the network thread. - self.test_nodes = [] - for i in range(self.num_nodes): - self.test_nodes.append(PivxTestNode()) - self.test_nodes[i].peer_connect('127.0.0.1', p2p_port(i)) - - # Let the test nodes get in sync - for i in range(self.num_nodes): - self.test_nodes[i].wait_for_verack() - def setColdStakingEnforcement(self, fEnable=True): sporkName = "SPORK_19_COLDSTAKING_MAINTENANCE" # update spork 19 with node[0] @@ -74,11 +52,12 @@ def isColdStakingEnforced(self): # verify from node[1] return not self.is_spork_active(1, "SPORK_19_COLDSTAKING_MAINTENANCE") - - def run_test(self): self.description = "Performs tests on the Cold Staking P2CS implementation" - self.init_test() + title = "*** Starting %s ***" % self.__class__.__name__ + underline = "-" * len(title) + self.log.info("\n\n%s\n%s\n%s\n", title, underline, self.description) + self.DEFAULT_FEE = 0.05 NUM_OF_INPUTS = 20 INPUT_VALUE = 249 diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py index ce2a17f172b4b..6110074c4fcd0 100755 --- a/test/functional/test_framework/mininode.py +++ b/test/functional/test_framework/mininode.py @@ -92,8 +92,7 @@ def peer_connect(self, dstaddr, dstport, net="regtest"): loop = NetworkThread.network_event_loop conn_gen_unsafe = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport) - conn_gen = lambda: loop.call_soon_threadsafe(loop.create_task, conn_gen_unsafe) - return conn_gen + return lambda: loop.call_soon_threadsafe(loop.create_task, conn_gen_unsafe) def peer_disconnect(self): # Connection could have already been closed by other end.