/ test / functional / mempool_updatefromblock.py
mempool_updatefromblock.py
  1  #!/usr/bin/env python3
  2  # Copyright (c) 2020-present The Bitcoin Core developers
  3  # Distributed under the MIT software license, see the accompanying
  4  # file COPYING or http://www.opensource.org/licenses/mit-license.php.
  5  """Test mempool descendants/ancestors information update.
  6  
  7  Test mempool update of transaction descendants/ancestors information (count, size)
  8  when transactions have been re-added from a disconnected block to the mempool.
  9  """
 10  from decimal import Decimal
 11  from math import ceil
 12  import time
 13  
 14  from test_framework.blocktools import create_empty_fork
 15  from test_framework.test_framework import BitcoinTestFramework
 16  from test_framework.util import assert_equal, assert_greater_than_or_equal, assert_raises_rpc_error
 17  from test_framework.wallet import MiniWallet
 18  from test_framework.mempool_util import DEFAULT_CLUSTER_LIMIT
 19  
 20  MAX_DISCONNECTED_TX_POOL_BYTES = 20_000_000
 21  
 22  CUSTOM_ANCESTOR_COUNT = 100
 23  CUSTOM_DESCENDANT_COUNT = CUSTOM_ANCESTOR_COUNT
 24  
 25  class MempoolUpdateFromBlockTest(BitcoinTestFramework):
 26      def set_test_params(self):
 27          self.num_nodes = 1
 28          self.extra_args = [['-limitclustersize=1000']]
 29  
 30      def trigger_reorg(self, fork_blocks):
 31          """Trigger reorg of the fork blocks."""
 32          for block in fork_blocks:
 33              self.nodes[0].submitblock(block.serialize().hex())
 34          assert_equal(self.nodes[0].getbestblockhash(), fork_blocks[-1].hash_hex)
 35  
 36      def transaction_graph_test(self, size, *, n_tx_to_mine, fee=100_000):
 37          """Create an acyclic tournament (a type of directed graph) of transactions and use it for testing.
 38  
 39          Keyword arguments:
 40          size -- the order N of the tournament which is equal to the number of the created transactions
 41          n_tx_to_mine -- the number of transactions that should be mined into a block
 42  
 43          If all of the N created transactions tx[0]..tx[N-1] reside in the mempool,
 44          the following holds:
 45              the tx[K] transaction:
 46              - has N-K descendants (including this one), and
 47              - has K+1 ancestors (including this one)
 48  
 49          More details: https://en.wikipedia.org/wiki/Tournament_(graph_theory)
 50          """
 51          wallet = MiniWallet(self.nodes[0])
 52  
 53          # Prep for fork with empty blocks to not use invalidateblock directly
 54          # for reorg case. The rpc has different codepath
 55          fork_blocks = create_empty_fork(self.nodes[0], fork_length=7)
 56  
 57          tx_id = []
 58          tx_size = []
 59          self.log.info('Creating {} transactions...'.format(size))
 60          for i in range(0, size):
 61              self.log.debug('Preparing transaction #{}...'.format(i))
 62              # Prepare inputs.
 63              if i == 0:
 64                  inputs = [wallet.get_utxo()]  # let MiniWallet provide a start UTXO
 65              else:
 66                  inputs = []
 67                  for j, tx in enumerate(tx_id[0:i]):
 68                      # Transaction tx[K] is a child of each of previous transactions tx[0]..tx[K-1] at their output K-1.
 69                      vout = i - j - 1
 70                      inputs.append(wallet.get_utxo(txid=tx_id[j], vout=vout))
 71  
 72              # Prepare outputs.
 73              tx_count = i + 1
 74              if tx_count < size:
 75                  # Transaction tx[K] is an ancestor of each of subsequent transactions tx[K+1]..tx[N-1].
 76                  n_outputs = size - tx_count
 77              else:
 78                  n_outputs = 1
 79  
 80              # Create a new transaction.
 81              new_tx = wallet.send_self_transfer_multi(
 82                  from_node=self.nodes[0],
 83                  utxos_to_spend=inputs,
 84                  num_outputs=n_outputs,
 85                  fee_per_output=ceil(fee / n_outputs)
 86              )
 87              tx_id.append(new_tx['txid'])
 88              tx_size.append(new_tx['tx'].get_vsize())
 89  
 90              if tx_count in n_tx_to_mine:
 91                  # The created transactions are mined into blocks by batches.
 92                  self.log.info('The batch of {} transactions has been accepted into the mempool.'.format(len(self.nodes[0].getrawmempool())))
 93                  self.generate(self.nodes[0], 1)[0]
 94                  assert_equal(len(self.nodes[0].getrawmempool()), 0)
 95                  self.log.info('All of the transactions from the current batch have been mined into a block.')
 96              elif tx_count == size:
 97                  # At the end the old fork is submitted to cause reorg, and all of the created
 98                  # transactions should be re-added from disconnected blocks to the mempool.
 99                  self.log.info('The last batch of {} transactions has been accepted into the mempool.'.format(len(self.nodes[0].getrawmempool())))
100                  start = time.time()
101                  # Trigger reorg
102                  for block in fork_blocks:
103                      self.nodes[0].submitblock(block.serialize().hex())
104                  end = time.time()
105                  assert_equal(len(self.nodes[0].getrawmempool()), size)
106                  self.log.info('All of the recently mined transactions have been re-added into the mempool in {} seconds.'.format(end - start))
107  
108          self.log.info('Checking descendants/ancestors properties of all of the in-mempool transactions...')
109          for k, tx in enumerate(tx_id):
110              self.log.debug('Check transaction #{}.'.format(k))
111              entry = self.nodes[0].getmempoolentry(tx)
112              assert_equal(entry['descendantcount'], size - k)
113              assert_equal(entry['descendantsize'], sum(tx_size[k:size]))
114              assert_equal(entry['ancestorcount'], k + 1)
115              assert_equal(entry['ancestorsize'], sum(tx_size[0:(k + 1)]))
116  
117          self.generate(self.nodes[0], 1)
118          assert_equal(self.nodes[0].getrawmempool(), [])
119          wallet.rescan_utxos()
120  
121      def test_max_disconnect_pool_bytes(self):
122          self.log.info('Creating independent transactions to test MAX_DISCONNECTED_TX_POOL_BYTES limit during reorg')
123  
124          # Generate coins for the hundreds of transactions we will make
125          parent_target_vsize = 100_000
126          wallet = MiniWallet(self.nodes[0])
127          self.generate(wallet, (MAX_DISCONNECTED_TX_POOL_BYTES // parent_target_vsize) + 100)
128  
129          assert_equal(self.nodes[0].getrawmempool(), [])
130  
131          # Set up empty fork blocks ahead of time, needs to be longer than full fork made later
132          fork_blocks = create_empty_fork(self.nodes[0], fork_length=60)
133  
134          large_std_txs = []
135          # Add children to ensure they're recursively removed if disconnectpool trimming of parent occurs
136          small_child_txs = []
137          aggregate_serialized_size = 0
138          while aggregate_serialized_size < MAX_DISCONNECTED_TX_POOL_BYTES:
139              # Mine parents in FIFO order via fee ordering
140              large_std_txs.append(wallet.create_self_transfer(target_vsize=parent_target_vsize, fee=Decimal("0.00400000") - (Decimal("0.00001000") * len(large_std_txs))))
141              small_child_txs.append(wallet.create_self_transfer(utxo_to_spend=large_std_txs[-1]['new_utxo']))
142              # Slight underestimate of dynamic cost, so we'll be over during reorg
143              aggregate_serialized_size += len(large_std_txs[-1]["tx"].serialize())
144  
145          for large_std_tx in large_std_txs:
146              self.nodes[0].sendrawtransaction(large_std_tx["hex"])
147  
148          assert_equal(self.nodes[0].getmempoolinfo()["size"], len(large_std_txs))
149  
150          # Mine non-empty chain that will be reorged shortly
151          self.generate(self.nodes[0], len(fork_blocks) - 1)
152          assert_equal(self.nodes[0].getrawmempool(), [])
153  
154          # Stick children in mempool, evicted with parent potentially
155          for small_child_tx in small_child_txs:
156              self.nodes[0].sendrawtransaction(small_child_tx["hex"])
157  
158          assert_equal(self.nodes[0].getmempoolinfo()["size"], len(small_child_txs))
159  
160          # Reorg back before the first block in the series, should drop something
161          # but not all, and any time parent is dropped, child is also removed
162          self.trigger_reorg(fork_blocks=fork_blocks)
163          mempool = self.nodes[0].getrawmempool()
164          # At least one parent must be dropped, but more may be dropped,
165          # depending on the dynamic cost overhead.
166          expected_parent_count = len(large_std_txs) - 1
167          assert_greater_than_or_equal(expected_parent_count * 2, len(mempool))
168          expected_parent_count = len(mempool) // 2
169  
170          parent_presence = [tx["txid"] in mempool for tx in large_std_txs]
171  
172          # The txns at the end of the list, or most recently confirmed, should have been trimmed
173          assert_equal(parent_presence, [tx["txid"] in mempool for tx in small_child_txs])
174          assert_equal(parent_presence, [True] * expected_parent_count + [False] * (len(large_std_txs) - expected_parent_count))
175  
176      def test_chainlimits_exceeded(self):
177          self.log.info('Check that too long chains on reorg are handled')
178  
179          wallet = MiniWallet(self.nodes[0])
180          self.generate(wallet, 101)
181  
182          assert_equal(self.nodes[0].getrawmempool(), [])
183  
184          # Prep fork
185          fork_blocks = create_empty_fork(self.nodes[0])
186  
187          # Two higher than descendant count
188          chain = wallet.create_self_transfer_chain(chain_length=DEFAULT_CLUSTER_LIMIT + 2)
189          for tx in chain[:-2]:
190              self.nodes[0].sendrawtransaction(tx["hex"])
191  
192          assert_raises_rpc_error(-26, "too-large-cluster", self.nodes[0].sendrawtransaction, chain[-2]["hex"])
193  
194          # Mine a block with all but last transaction, non-standardly long chain
195          self.generateblock(self.nodes[0], output="raw(42)", transactions=[tx["hex"] for tx in chain[:-1]])
196          assert_equal(self.nodes[0].getrawmempool(), [])
197  
198          # Last tx fits now
199          self.nodes[0].sendrawtransaction(chain[-1]["hex"])
200  
201          # Finally, reorg to empty chain to kick everything back into mempool
202          # at normal chain limits
203          for block in fork_blocks:
204              self.nodes[0].submitblock(block.serialize().hex())
205          mempool = self.nodes[0].getrawmempool()
206          assert_equal(set(mempool), set([tx["txid"] for tx in chain[:-2]]))
207  
208      def run_test(self):
209          # Mine in batches of 25 to test multi-block reorg under chain limits
210          self.transaction_graph_test(size=DEFAULT_CLUSTER_LIMIT, n_tx_to_mine=[25, 50, 75])
211  
212          self.test_max_disconnect_pool_bytes()
213  
214          self.test_chainlimits_exceeded()
215  
216  if __name__ == '__main__':
217      MempoolUpdateFromBlockTest(__file__).main()