/ test / functional / feature_maxuploadtarget.py
feature_maxuploadtarget.py
  1  #!/usr/bin/env python3
  2  # Copyright (c) 2015-present The Bitcoin Core developers
  3  # Distributed under the MIT software license, see the accompanying
  4  # file COPYING or http://www.opensource.org/licenses/mit-license.php.
  5  """Test behavior of -maxuploadtarget.
  6  
  7  * Verify that getdata requests for old blocks (>1week) are dropped
  8  if uploadtarget has been reached.
  9  * Verify that getdata requests for recent blocks are respected even
 10  if uploadtarget has been reached.
 11  * Verify that mempool requests lead to a disconnect if uploadtarget has been reached.
 12  * Verify that the upload counters are reset after 24 hours.
 13  """
 14  from collections import defaultdict
 15  import time
 16  
 17  from test_framework.messages import (
 18      CInv,
 19      MSG_BLOCK,
 20      msg_getdata,
 21      msg_mempool,
 22  )
 23  from test_framework.p2p import P2PInterface
 24  from test_framework.test_framework import BitcoinTestFramework
 25  from test_framework.util import (
 26      assert_equal,
 27      mine_large_block,
 28  )
 29  from test_framework.wallet import MiniWallet
 30  
 31  
 32  UPLOAD_TARGET_MB = 800
 33  
 34  
 35  class TestP2PConn(P2PInterface):
 36      def __init__(self):
 37          super().__init__()
 38          self.block_receive_map = defaultdict(int)
 39  
 40      def on_inv(self, message):
 41          pass
 42  
 43      def on_block(self, message):
 44          self.block_receive_map[message.block.hash_int] += 1
 45  
 46  class MaxUploadTest(BitcoinTestFramework):
 47  
 48      def set_test_params(self):
 49          self.setup_clean_chain = True
 50          self.num_nodes = 1
 51          self.extra_args = [[
 52              f"-maxuploadtarget={UPLOAD_TARGET_MB}M",
 53          ]]
 54  
 55      def assert_uploadtarget_state(self, *, target_reached, serve_historical_blocks):
 56          """Verify the node's current upload target state via the `getnettotals` RPC call."""
 57          uploadtarget = self.nodes[0].getnettotals()["uploadtarget"]
 58          assert_equal(uploadtarget["target_reached"], target_reached)
 59          assert_equal(uploadtarget["serve_historical_blocks"], serve_historical_blocks)
 60  
 61      def run_test(self):
 62          # Initially, neither historical blocks serving limit nor total limit are reached
 63          self.assert_uploadtarget_state(target_reached=False, serve_historical_blocks=True)
 64  
 65          # Before we connect anything, we first set the time on the node
 66          # to be in the past, otherwise things break because the CNode
 67          # time counters can't be reset backward after initialization
 68          old_time = int(time.time() - 2*60*60*24*7)
 69          self.nodes[0].setmocktime(old_time)
 70  
 71          # Generate some old blocks
 72          self.wallet = MiniWallet(self.nodes[0])
 73          self.generate(self.wallet, 130)
 74  
 75          # p2p_conns[0] will only request old blocks
 76          # p2p_conns[1] will only request new blocks
 77          # p2p_conns[2] will test resetting the counters
 78          p2p_conns = []
 79  
 80          for _ in range(3):
 81              # Don't use v2transport in this test (too slow with the unoptimized python ChaCha20 implementation)
 82              p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn(), supports_v2_p2p=False))
 83  
 84          # Now mine a big block
 85          mine_large_block(self, self.wallet, self.nodes[0])
 86  
 87          # Store the hash; we'll request this later
 88          big_old_block = self.nodes[0].getbestblockhash()
 89          old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
 90          big_old_block = int(big_old_block, 16)
 91  
 92          # Advance to two days ago
 93          self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
 94  
 95          # Mine one more block, so that the prior block looks old
 96          mine_large_block(self, self.wallet, self.nodes[0])
 97  
 98          # We'll be requesting this new block too
 99          big_new_block = self.nodes[0].getbestblockhash()
100          big_new_block = int(big_new_block, 16)
101  
102          # p2p_conns[0] will test what happens if we just keep requesting the
103          # the same big old block too many times (expect: disconnect)
104  
105          getdata_request = msg_getdata()
106          getdata_request.inv.append(CInv(MSG_BLOCK, big_old_block))
107  
108          max_bytes_per_day = UPLOAD_TARGET_MB * 1024 *1024
109          daily_buffer = 144 * 4000000
110          max_bytes_available = max_bytes_per_day - daily_buffer
111          success_count = max_bytes_available // old_block_size
112  
113          # 576MB will be reserved for relaying new blocks, so expect this to
114          # succeed for ~235 tries.
115          for i in range(success_count):
116              p2p_conns[0].send_and_ping(getdata_request)
117              assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1)
118  
119          assert_equal(len(self.nodes[0].getpeerinfo()), 3)
120          # At most a couple more tries should succeed (depending on how long
121          # the test has been running so far).
122          with self.nodes[0].assert_debug_log(expected_msgs=["historical block serving limit reached, disconnecting peer=0"]):
123              for _ in range(3):
124                  p2p_conns[0].send_without_ping(getdata_request)
125              p2p_conns[0].wait_for_disconnect()
126          assert_equal(len(self.nodes[0].getpeerinfo()), 2)
127          self.log.info("Peer 0 disconnected after downloading old block too many times")
128  
129          # Historical blocks serving limit is reached by now, but total limit still isn't
130          self.assert_uploadtarget_state(target_reached=False, serve_historical_blocks=False)
131  
132          # Requesting the current block on p2p_conns[1] should succeed indefinitely,
133          # even when over the max upload target.
134          # We'll try 800 times
135          getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)]
136          for i in range(800):
137              p2p_conns[1].send_and_ping(getdata_request)
138              assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1)
139  
140          # Both historical blocks serving limit and total limit are reached
141          self.assert_uploadtarget_state(target_reached=True, serve_historical_blocks=False)
142  
143          self.log.info("Peer 1 able to repeatedly download new block")
144  
145          # But if p2p_conns[1] tries for an old block, it gets disconnected too.
146          getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
147          with self.nodes[0].assert_debug_log(expected_msgs=["historical block serving limit reached, disconnecting peer=1"]):
148              p2p_conns[1].send_without_ping(getdata_request)
149              p2p_conns[1].wait_for_disconnect()
150          assert_equal(len(self.nodes[0].getpeerinfo()), 1)
151  
152          self.log.info("Peer 1 disconnected after trying to download old block")
153  
154          self.log.info("Advancing system time on node to clear counters...")
155  
156          # If we advance the time by 24 hours, then the counters should reset,
157          # and p2p_conns[2] should be able to retrieve the old block.
158          self.nodes[0].setmocktime(int(time.time()))
159          p2p_conns[2].sync_with_ping()
160          p2p_conns[2].send_and_ping(getdata_request)
161          assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1)
162          self.assert_uploadtarget_state(target_reached=False, serve_historical_blocks=True)
163  
164          self.log.info("Peer 2 able to download old block")
165  
166          self.nodes[0].disconnect_p2ps()
167  
168          self.log.info("Restarting node 0 with download permission, bloom filter support and 1MB maxuploadtarget")
169          self.restart_node(0, ["-whitelist=download@127.0.0.1", "-peerbloomfilters", "-maxuploadtarget=1"])
170          # Total limit isn't reached after restart, but 1 MB is too small to serve historical blocks
171          self.assert_uploadtarget_state(target_reached=False, serve_historical_blocks=False)
172  
173          # Reconnect to self.nodes[0]
174          peer = self.nodes[0].add_p2p_connection(TestP2PConn(), supports_v2_p2p=False)
175  
176          # Sending mempool message shouldn't disconnect peer, as total limit isn't reached yet
177          peer.send_and_ping(msg_mempool())
178  
179          #retrieve 20 blocks which should be enough to break the 1MB limit
180          getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)]
181          for i in range(20):
182              peer.send_and_ping(getdata_request)
183              assert_equal(peer.block_receive_map[big_new_block], i+1)
184  
185          # Total limit is exceeded
186          self.assert_uploadtarget_state(target_reached=True, serve_historical_blocks=False)
187  
188          getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
189          peer.send_and_ping(getdata_request)
190  
191          self.log.info("Peer still connected after trying to download old block (download permission)")
192          peer_info = self.nodes[0].getpeerinfo()
193          assert_equal(len(peer_info), 1)  # node is still connected
194          assert_equal(peer_info[0]['permissions'], ['download'])
195  
196          self.log.info("Peer gets disconnected for a mempool request after limit is reached")
197          with self.nodes[0].assert_debug_log(expected_msgs=["mempool request with bandwidth limit reached, disconnecting peer=0"]):
198              peer.send_without_ping(msg_mempool())
199              peer.wait_for_disconnect()
200  
201          self.log.info("Test passing an unparsable value to -maxuploadtarget throws an error")
202          self.stop_node(0)
203          self.nodes[0].assert_start_raises_init_error(extra_args=["-maxuploadtarget=abc"], expected_msg="Error: Unable to parse -maxuploadtarget: 'abc'")
204  
205  if __name__ == '__main__':
206      MaxUploadTest(__file__).main()