/ test / functional / feature_assumeutxo.py
feature_assumeutxo.py
  1  #!/usr/bin/env python3
  2  # Copyright (c) 2021-present The Bitcoin Core developers
  3  # Distributed under the MIT software license, see the accompanying
  4  # file COPYING or http://www.opensource.org/licenses/mit-license.php.
  5  """Test for assumeutxo, a means of quickly bootstrapping a node using
  6  a serialized version of the UTXO set at a certain height, which corresponds
  7  to a hash that has been compiled into bitcoind.
  8  
  9  The assumeutxo value generated and used here is committed to in
 10  `CRegTestParams::m_assumeutxo_data` in `src/kernel/chainparams.cpp`.
 11  """
 12  import contextlib
 13  
 14  from dataclasses import dataclass
 15  from test_framework.blocktools import (
 16          create_block,
 17  )
 18  from test_framework.compressor import (
 19      compress_amount,
 20  )
 21  from test_framework.messages import (
 22      CBlockHeader,
 23      from_hex,
 24      MAGIC_BYTES,
 25      MAX_MONEY,
 26      msg_headers,
 27      ser_varint,
 28      tx_from_hex,
 29  )
 30  from test_framework.p2p import (
 31      P2PInterface,
 32  )
 33  from test_framework.test_framework import BitcoinTestFramework
 34  from test_framework.util import (
 35      assert_approx,
 36      assert_equal,
 37      assert_not_equal,
 38      assert_raises_rpc_error,
 39      dumb_sync_blocks,
 40      ensure_for,
 41      sha256sum_file,
 42      try_rpc,
 43  )
 44  from test_framework.wallet import (
 45      getnewdestination,
 46      MiniWallet,
 47  )
 48  from test_framework.blocktools import (
 49      REGTEST_N_BITS,
 50      REGTEST_TARGET,
 51      nbits_str,
 52      target_str,
 53  )
 54  
 55  START_HEIGHT = 199
 56  SNAPSHOT_BASE_HEIGHT = 299
 57  FINAL_HEIGHT = 399
 58  COMPLETE_IDX = {'synced': True, 'best_block_height': FINAL_HEIGHT}
 59  
 60  
 61  class AssumeutxoTest(BitcoinTestFramework):
 62  
 63      def set_test_params(self):
 64          """Use the pregenerated, deterministic chain up to height 199."""
 65          self.num_nodes = 4
 66          self.rpc_timeout = 120
 67          self.extra_args = [
 68              [],
 69              ["-fastprune", "-prune=1", "-blockfilterindex=1", "-coinstatsindex=1"],
 70              ["-persistmempool=0","-txindex=1", "-blockfilterindex=1", "-coinstatsindex=1"],
 71              []
 72          ]
 73  
 74      def setup_network(self):
 75          """Start with the nodes disconnected so that one can generate a snapshot
 76          including blocks the other hasn't yet seen."""
 77          self.add_nodes(4)
 78          self.start_nodes(extra_args=self.extra_args)
 79  
 80      def test_invalid_snapshot_scenarios(self, valid_snapshot_path):
 81          self.log.info("Test different scenarios of loading invalid snapshot files")
 82          with open(valid_snapshot_path, 'rb') as f:
 83              valid_snapshot_contents = f.read()
 84          bad_snapshot_path = valid_snapshot_path + '.mod'
 85          node = self.nodes[1]
 86  
 87          def expected_error(msg):
 88              assert_raises_rpc_error(-32603, f"Unable to load UTXO snapshot: Population failed: {msg}", node.loadtxoutset, bad_snapshot_path)
 89  
 90          self.log.info("  - snapshot file with invalid file magic")
 91          parsing_error_code = -22
 92          bad_magic = 0xf00f00f000
 93          with open(bad_snapshot_path, 'wb') as f:
 94              f.write(bad_magic.to_bytes(5, "big") + valid_snapshot_contents[5:])
 95          assert_raises_rpc_error(parsing_error_code, "Unable to parse metadata: Invalid UTXO set snapshot magic bytes. Please check if this is indeed a snapshot file or if you are using an outdated snapshot format.", node.loadtxoutset, bad_snapshot_path)
 96  
 97          self.log.info("  - snapshot file with unsupported version")
 98          for version in [0, 1, 3]:
 99              with open(bad_snapshot_path, 'wb') as f:
100                  f.write(valid_snapshot_contents[:5] + version.to_bytes(2, "little") + valid_snapshot_contents[7:])
101              assert_raises_rpc_error(parsing_error_code, f"Unable to parse metadata: Version of snapshot {version} does not match any of the supported versions.", node.loadtxoutset, bad_snapshot_path)
102  
103          self.log.info("  - snapshot file with mismatching network magic")
104          invalid_magics = [
105              # magic, name, real
106              [MAGIC_BYTES["mainnet"], "main", True],
107              [MAGIC_BYTES["testnet4"], "testnet4", True],
108              [MAGIC_BYTES["signet"], "signet", True],
109              [0x00000000.to_bytes(4, 'big'), "", False],
110              [0xffffffff.to_bytes(4, 'big'), "", False],
111          ]
112          for [magic, name, real] in invalid_magics:
113              with open(bad_snapshot_path, 'wb') as f:
114                  f.write(valid_snapshot_contents[:7] + magic + valid_snapshot_contents[11:])
115              if real:
116                  assert_raises_rpc_error(parsing_error_code, f"Unable to parse metadata: The network of the snapshot ({name}) does not match the network of this node (regtest).", node.loadtxoutset, bad_snapshot_path)
117              else:
118                  assert_raises_rpc_error(parsing_error_code, "Unable to parse metadata: This snapshot has been created for an unrecognized network. This could be a custom signet, a new testnet or possibly caused by data corruption.", node.loadtxoutset, bad_snapshot_path)
119  
120          self.log.info("  - snapshot file referring to a block that is not in the assumeutxo parameters")
121          prev_block_hash = self.nodes[0].getblockhash(SNAPSHOT_BASE_HEIGHT - 1)
122          bogus_block_hash = "0" * 64  # Represents any unknown block hash
123          for bad_block_hash in [bogus_block_hash, prev_block_hash]:
124              with open(bad_snapshot_path, 'wb') as f:
125                  f.write(valid_snapshot_contents[:11] + bytes.fromhex(bad_block_hash)[::-1] + valid_snapshot_contents[43:])
126  
127              msg = f"Unable to load UTXO snapshot: assumeutxo block hash in snapshot metadata not recognized (hash: {bad_block_hash}). The following snapshot heights are available: 110, 200, 299."
128              assert_raises_rpc_error(-32603, msg, node.loadtxoutset, bad_snapshot_path)
129  
130          self.log.info("  - snapshot file with wrong number of coins")
131          valid_num_coins = int.from_bytes(valid_snapshot_contents[43:43 + 8], "little")
132          for off in [-1, +1]:
133              with open(bad_snapshot_path, 'wb') as f:
134                  f.write(valid_snapshot_contents[:43])
135                  f.write((valid_num_coins + off).to_bytes(8, "little"))
136                  f.write(valid_snapshot_contents[43 + 8:])
137              expected_error(msg="Bad snapshot - coins left over after deserializing 298 coins." if off == -1 else "Bad snapshot format or truncated snapshot after deserializing 299 coins.")
138  
139          self.log.info("  - snapshot file with alternated but parsable UTXO data results in different hash")
140          cases = [
141              # (content, offset, wrong_hash, custom_message)
142              [b"\xff" * 32, 0, "77874d48d932a5cb7a7f770696f5224ff05746fdcf732a58270b45da0f665934", None],  # wrong outpoint hash
143              [(2).to_bytes(1, "little"), 32, None, "Bad snapshot format or truncated snapshot after deserializing 1 coins."],  # wrong txid coins count
144              [b"\xfd\xff\xff", 32, None, "Mismatch in coins count in snapshot metadata and actual snapshot data"],  # txid coins count exceeds coins left
145              [b"\x01", 33, "9f562925721e4f97e6fde5b590dbfede51e2204a68639525062ad064545dd0ea", None],  # wrong outpoint index
146              [b"\x82", 34, "161393f07f8ad71760b3910a914f677f2cb166e5bcf5354e50d46b78c0422d15", None],  # wrong coin code VARINT
147              [b"\x80", 34, "e6fae191ef851554467b68acff01ca09ad0a2e48c9b3dfea46cf7d35a7fd0ad0", None],  # another wrong coin code
148              [b"\x84\x58", 34, None, "Bad snapshot data after deserializing 0 coins"],  # wrong coin case with height 364 and coinbase 0
149              [
150                  # compressed txout value + scriptpubkey
151                  ser_varint(compress_amount(MAX_MONEY + 1)) + ser_varint(0),
152                  # txid + coins per txid + vout + coin height
153                  32 + 1 + 1 + 2,
154                  None,
155                  "Bad snapshot data after deserializing 0 coins - bad tx out value"
156              ],  # Amount exceeds MAX_MONEY
157          ]
158  
159          for content, offset, wrong_hash, custom_message in cases:
160              with open(bad_snapshot_path, "wb") as f:
161                  # Prior to offset: Snapshot magic, snapshot version, network magic, hash, coins count
162                  f.write(valid_snapshot_contents[:(5 + 2 + 4 + 32 + 8 + offset)])
163                  f.write(content)
164                  f.write(valid_snapshot_contents[(5 + 2 + 4 + 32 + 8 + offset + len(content)):])
165  
166              msg = custom_message if custom_message is not None else f"Bad snapshot content hash: expected d2b051ff5e8eef46520350776f4100dd710a63447a8e01d917e92e79751a63e2, got {wrong_hash}."
167              expected_error(msg)
168  
169      def test_headers_not_synced(self, valid_snapshot_path):
170          for node in self.nodes[1:]:
171              msg = "Unable to load UTXO snapshot: The base block header (7cc695046fec709f8c9394b6f928f81e81fd3ac20977bb68760fa1faa7916ea2) must appear in the headers chain. Make sure all headers are syncing, and call loadtxoutset again."
172              assert_raises_rpc_error(-32603, msg, node.loadtxoutset, valid_snapshot_path)
173  
174      def test_invalid_chainstate_scenarios(self):
175          self.log.info("Test different scenarios of invalid snapshot chainstate in datadir")
176  
177          self.log.info("  - snapshot chainstate referring to a block that is not in the assumeutxo parameters")
178          self.stop_node(0)
179          chainstate_snapshot_path = self.nodes[0].chain_path / "chainstate_snapshot"
180          chainstate_snapshot_path.mkdir()
181          with open(chainstate_snapshot_path / "base_blockhash", 'wb') as f:
182              f.write(b'z' * 32)
183  
184          def expected_error(log_msg="", error_msg=""):
185              with self.nodes[0].assert_debug_log([log_msg]):
186                  self.nodes[0].assert_start_raises_init_error(expected_msg=error_msg)
187  
188          expected_error_msg = "Error: A fatal internal error occurred, see debug.log for details: Assumeutxo data not found for the given blockhash '7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a'."
189          error_details = "Assumeutxo data not found for the given blockhash"
190          expected_error(log_msg=error_details, error_msg=expected_error_msg)
191  
192          # resurrect node again
193          (chainstate_snapshot_path / "base_blockhash").unlink()
194          chainstate_snapshot_path.rmdir()
195          self.start_node(0)
196  
197      def test_invalid_mempool_state(self, dump_output_path):
198          self.log.info("Test bitcoind should fail when mempool not empty.")
199          node=self.nodes[2]
200          tx = MiniWallet(node).send_self_transfer(from_node=node)
201  
202          assert tx['txid'] in node.getrawmempool()
203  
204          # Attempt to load the snapshot on Node 2 and expect it to fail
205          msg = "Unable to load UTXO snapshot: Can't activate a snapshot when mempool not empty"
206          assert_raises_rpc_error(-32603, msg, node.loadtxoutset, dump_output_path)
207  
208          self.restart_node(2, extra_args=self.extra_args[2])
209  
210      def test_invalid_file_path(self):
211          self.log.info("Test bitcoind should fail when file path is invalid.")
212          node = self.nodes[0]
213          path = node.datadir_path / node.chain / "invalid" / "path"
214          assert_raises_rpc_error(-8, "Couldn't open file {} for reading.".format(path), node.loadtxoutset, path)
215  
216      def test_snapshot_with_less_work(self, dump_output_path):
217          self.log.info("Test bitcoind should fail when snapshot has less accumulated work than this node.")
218          node = self.nodes[0]
219          msg = "Unable to load UTXO snapshot: Population failed: Work does not exceed active chainstate."
220          assert_raises_rpc_error(-32603, msg, node.loadtxoutset, dump_output_path)
221  
222      def test_snapshot_block_invalidated(self, dump_output_path):
223          self.log.info("Test snapshot is not loaded when base block is invalid.")
224          node = self.nodes[0]
225          # We are testing the case where the base block is invalidated itself
226          # and also the case where one of its parents is invalidated.
227          for height in [SNAPSHOT_BASE_HEIGHT, SNAPSHOT_BASE_HEIGHT - 1]:
228              block_hash = node.getblockhash(height)
229              node.invalidateblock(block_hash)
230              assert_equal(node.getblockcount(), height - 1)
231              msg = "Unable to load UTXO snapshot: The base block header (7cc695046fec709f8c9394b6f928f81e81fd3ac20977bb68760fa1faa7916ea2) is part of an invalid chain."
232              assert_raises_rpc_error(-32603, msg, node.loadtxoutset, dump_output_path)
233              node.reconsiderblock(block_hash)
234  
235      def test_snapshot_in_a_divergent_chain(self, dump_output_path):
236          n0 = self.nodes[0]
237          n3 = self.nodes[3]
238          assert_equal(n0.getblockcount(), FINAL_HEIGHT)
239          assert_equal(n3.getblockcount(), START_HEIGHT)
240  
241          self.log.info("Check importing a snapshot where current chain-tip is not an ancestor of the snapshot block but has less work")
242          # Generate a divergent chain in n3 up to 298
243          self.generate(n3, nblocks=99, sync_fun=self.no_op)
244          assert_equal(n3.getblockcount(), SNAPSHOT_BASE_HEIGHT - 1)
245  
246          # Try importing the snapshot and assert its success
247          loaded = n3.loadtxoutset(dump_output_path)
248          assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT)
249          normal, snapshot = n3.getchainstates()["chainstates"]
250          assert_equal(normal['blocks'], START_HEIGHT + 99)
251          assert_equal(snapshot['blocks'], SNAPSHOT_BASE_HEIGHT)
252  
253          # Both states should have the same nBits and target
254          assert_equal(normal['bits'], nbits_str(REGTEST_N_BITS))
255          assert_equal(normal['bits'], snapshot['bits'])
256          assert_equal(normal['target'], target_str(REGTEST_TARGET))
257          assert_equal(normal['target'], snapshot['target'])
258  
259          # Now lets sync the nodes and wait for the background validation to finish
260          self.connect_nodes(0, 3)
261          self.sync_blocks(nodes=(n0, n3))
262          self.wait_until(lambda: len(n3.getchainstates()['chainstates']) == 1)
263  
264      def test_snapshot_not_on_most_work_chain(self, dump_output_path):
265          self.log.info("Test snapshot is not loaded when the node knows the headers of another chain with more work.")
266          node0 = self.nodes[0]
267          node1 = self.nodes[1]
268          # Create an alternative chain of 2 new blocks, forking off the main chain at the block before the snapshot block.
269          # This simulates a longer chain than the main chain when submitting these two block headers to node 1 because it is only aware of
270          # the main chain headers up to the snapshot height.
271          parent_block_hash = node0.getblockhash(SNAPSHOT_BASE_HEIGHT - 1)
272          block_time = node0.getblock(node0.getbestblockhash())['time'] + 1
273          fork_block1 = create_block(int(parent_block_hash, 16), height=SNAPSHOT_BASE_HEIGHT, ntime=block_time)
274          fork_block1.solve()
275          fork_block2 = create_block(fork_block1.hash_int, height=SNAPSHOT_BASE_HEIGHT + 1, ntime=block_time + 1)
276          fork_block2.solve()
277          node1.submitheader(fork_block1.serialize().hex())
278          node1.submitheader(fork_block2.serialize().hex())
279          msg = "A forked headers-chain with more work than the chain with the snapshot base block header exists. Please proceed to sync without AssumeUtxo."
280          assert_raises_rpc_error(-32603, msg, node1.loadtxoutset, dump_output_path)
281          # Cleanup: submit two more headers of the snapshot chain to node 1, so that it is the most-work chain again and loading
282          # the snapshot in future subtests succeeds
283          main_block1 = node0.getblock(node0.getblockhash(SNAPSHOT_BASE_HEIGHT + 1), 0)
284          main_block2 = node0.getblock(node0.getblockhash(SNAPSHOT_BASE_HEIGHT + 2), 0)
285          node1.submitheader(main_block1)
286          node1.submitheader(main_block2)
287  
288      def test_sync_from_assumeutxo_node(self, snapshot):
289          """
290          This test verifies that:
291          1. An IBD node can sync headers from an AssumeUTXO node at any time.
292          2. IBD nodes do not request historical blocks from AssumeUTXO nodes while they are syncing the background-chain.
293          3. The assumeUTXO node dynamically adjusts the network services it offers according to its state.
294          4. IBD nodes can fully sync from AssumeUTXO nodes after they finish the background-chain sync.
295          """
296          self.log.info("Testing IBD-sync from assumeUTXO node")
297          # Node2 starts clean and loads the snapshot.
298          # Node3 starts clean and seeks to sync-up from snapshot_node.
299          miner = self.nodes[0]
300          snapshot_node = self.nodes[2]
301          ibd_node = self.nodes[3]
302  
303          # Start test fresh by cleaning up node directories
304          for node in (snapshot_node, ibd_node):
305              self.stop_node(node.index)
306              self.cleanup_folder(node.chain_path)
307              self.start_node(node.index, extra_args=self.extra_args[node.index])
308  
309          # Sync-up headers chain on snapshot_node to load snapshot
310          headers_provider_conn = snapshot_node.add_p2p_connection(P2PInterface())
311          headers_provider_conn.wait_for_getheaders()
312          msg = msg_headers()
313          for block_num in range(1, miner.getblockcount()+1):
314              msg.headers.append(from_hex(CBlockHeader(), miner.getblockheader(miner.getblockhash(block_num), verbose=False)))
315          headers_provider_conn.send_without_ping(msg)
316  
317          # Ensure headers arrived
318          default_value = {'status': ''}  # No status
319          headers_tip_hash = miner.getbestblockhash()
320          self.wait_until(lambda: next(filter(lambda x: x['hash'] == headers_tip_hash, snapshot_node.getchaintips()), default_value)['status'] == "headers-only")
321          snapshot_node.disconnect_p2ps()
322  
323          # Load snapshot
324          snapshot_node.loadtxoutset(snapshot['path'])
325  
326          # Connect nodes and verify the ibd_node can sync-up the headers-chain from the snapshot_node
327          self.connect_nodes(ibd_node.index, snapshot_node.index)
328          snapshot_block_hash = snapshot['base_hash']
329          self.wait_until(lambda: next(filter(lambda x: x['hash'] == snapshot_block_hash, ibd_node.getchaintips()), default_value)['status'] == "headers-only")
330  
331          # Once the headers-chain is synced, the ibd_node must avoid requesting historical blocks from the snapshot_node.
332          # If it does request such blocks, the snapshot_node will ignore requests it cannot fulfill, causing the ibd_node
333          # to stall. This stall could last for up to 10 min, ultimately resulting in an abrupt disconnection due to the
334          # ibd_node's perceived unresponsiveness.
335          ensure_for(duration=3, f=lambda: len(ibd_node.getpeerinfo()[0]['inflight']) == 0)
336  
337          # Now disconnect nodes and finish background chain sync
338          self.disconnect_nodes(ibd_node.index, snapshot_node.index)
339          self.connect_nodes(snapshot_node.index, miner.index)
340          self.sync_blocks(nodes=(miner, snapshot_node))
341          # Check the base snapshot block was stored and ensure node signals full-node service support
342          self.wait_until(lambda: not try_rpc(-1, "Block not available (not fully downloaded)", snapshot_node.getblock, snapshot_block_hash))
343          self.wait_until(lambda: 'NETWORK' in snapshot_node.getnetworkinfo()['localservicesnames'])
344  
345          # Now that the snapshot_node is synced, verify the ibd_node can sync from it
346          self.connect_nodes(snapshot_node.index, ibd_node.index)
347          assert 'NETWORK' in ibd_node.getpeerinfo()[0]['servicesnames']
348          self.sync_blocks(nodes=(ibd_node, snapshot_node))
349  
350      def test_sync_to_most_work_chain_after_background_validation(self):
351          """
352          After background validation completes, node should be able
353          to download and process blocks from peers without the snapshot block in their chain.
354          """
355          self.log.info("Testing sync to the most-work chain without the snapshot block after background validation")
356  
357          forking_node = self.nodes[0]
358          snapshot_node = self.nodes[2]  # Has already completed background validation
359  
360          self.log.info("Forking node switches to an alternative chain that forks one block before the snapshot block")
361          fork_point = SNAPSHOT_BASE_HEIGHT - 1
362          forking_node_old_height = forking_node.getblockcount()
363          forking_node_old_chainwork = int(forking_node.getblockchaininfo()['chainwork'], 16)
364          forking_node.invalidateblock(forking_node.getblockhash(fork_point + 1))
365  
366          self.log.info("Mine one more block than original chain to make the new chain have most work")
367          self.generate(forking_node, nblocks=(forking_node_old_height - fork_point) + 1, sync_fun=self.no_op)
368          assert int(forking_node.getblockchaininfo()['chainwork'], 16) > forking_node_old_chainwork
369  
370          self.log.info("Snapshot node should reorg to the most-work chain without the snapshot block")
371          self.sync_blocks(nodes=(snapshot_node, forking_node))
372  
373      def assert_only_network_limited_service(self, node):
374          node_services = node.getnetworkinfo()['localservicesnames']
375          assert 'NETWORK' not in node_services
376          assert 'NETWORK_LIMITED' in node_services
377  
378      @contextlib.contextmanager
379      def assert_disk_cleanup(self, node, assumeutxo_used):
380          """
381          Ensure an assumeutxo node is cleaning up the background chainstate
382          """
383          msg = []
384          if assumeutxo_used:
385              # Check that the snapshot actually existed before restart
386              assert (node.datadir_path / node.chain / "chainstate_snapshot").exists()
387              msg = ["cleaning up unneeded background chainstate"]
388  
389          with node.assert_debug_log(msg):
390              yield
391  
392          assert not (node.datadir_path / node.chain / "chainstate_snapshot").exists()
393  
394      def run_test(self):
395          """
396          Bring up two (disconnected) nodes, mine some new blocks on the first,
397          and generate a UTXO snapshot.
398  
399          Load the snapshot into the second, ensure it syncs to tip and completes
400          background validation when connected to the first.
401          """
402          n0 = self.nodes[0]
403          n1 = self.nodes[1]
404          n2 = self.nodes[2]
405          n3 = self.nodes[3]
406  
407          self.mini_wallet = MiniWallet(n0)
408  
409          # Mock time for a deterministic chain
410          for n in self.nodes:
411              n.setmocktime(n.getblockheader(n.getbestblockhash())['time'])
412  
413          # Generate a series of blocks that `n0` will have in the snapshot,
414          # but that n1 and n2 don't yet see.
415          assert_equal(n0.getblockcount(), START_HEIGHT)
416          blocks = {START_HEIGHT: Block(n0.getbestblockhash(), 1, START_HEIGHT + 1)}
417          for i in range(100):
418              block_tx = 1
419              if i % 3 == 0:
420                  self.mini_wallet.send_self_transfer(from_node=n0)
421                  block_tx += 1
422              self.generate(n0, nblocks=1, sync_fun=self.no_op)
423              height = n0.getblockcount()
424              hash = n0.getbestblockhash()
425              blocks[height] = Block(hash, block_tx, blocks[height-1].chain_tx + block_tx)
426              if i == 4:
427                  # Create a stale block that forks off the main chain before the snapshot.
428                  temp_invalid = n0.getbestblockhash()
429                  n0.invalidateblock(temp_invalid)
430                  stale_hash = self.generateblock(n0, output="raw(aaaa)", transactions=[], sync_fun=self.no_op)["hash"]
431                  n0.invalidateblock(stale_hash)
432                  n0.reconsiderblock(temp_invalid)
433                  stale_block = n0.getblock(stale_hash, 0)
434  
435  
436          self.log.info("-- Testing assumeutxo + some indexes + pruning")
437  
438          assert_equal(n0.getblockcount(), SNAPSHOT_BASE_HEIGHT)
439          assert_equal(n1.getblockcount(), START_HEIGHT)
440  
441          self.log.info(f"Creating a UTXO snapshot at height {SNAPSHOT_BASE_HEIGHT}")
442          dump_output = n0.dumptxoutset('utxos.dat', "latest")
443  
444          self.log.info("Test loading snapshot when the node tip is on the same block as the snapshot")
445          assert_equal(n0.getblockcount(), SNAPSHOT_BASE_HEIGHT)
446          assert_equal(n0.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT)
447          self.test_snapshot_with_less_work(dump_output['path'])
448  
449          self.log.info("Test loading snapshot when headers are not synced")
450          self.test_headers_not_synced(dump_output['path'])
451  
452          # In order for the snapshot to activate, we have to ferry over the new
453          # headers to n1 and n2 so that they see the header of the snapshot's
454          # base block while disconnected from n0.
455          for i in range(1, 300):
456              block = n0.getblock(n0.getblockhash(i), 0)
457              # make n1 and n2 aware of the new header, but don't give them the
458              # block.
459              n1.submitheader(block)
460              n2.submitheader(block)
461              n3.submitheader(block)
462  
463          # Ensure everyone is seeing the same headers.
464          for n in self.nodes:
465              assert_equal(n.getblockchaininfo()["headers"], SNAPSHOT_BASE_HEIGHT)
466  
467          assert_equal(n0.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT)
468  
469          def check_dump_output(output):
470              assert_equal(
471                  output['txoutset_hash'],
472                  "d2b051ff5e8eef46520350776f4100dd710a63447a8e01d917e92e79751a63e2")
473              assert_equal(output["nchaintx"], blocks[SNAPSHOT_BASE_HEIGHT].chain_tx)
474  
475          check_dump_output(dump_output)
476  
477          # Mine more blocks on top of the snapshot that n1 hasn't yet seen. This
478          # will allow us to test n1's sync-to-tip on top of a snapshot.
479          self.generate(n0, nblocks=100, sync_fun=self.no_op)
480  
481          assert_equal(n0.getblockcount(), FINAL_HEIGHT)
482          assert_equal(n1.getblockcount(), START_HEIGHT)
483  
484          assert_equal(n0.getblockchaininfo()["blocks"], FINAL_HEIGHT)
485  
486          self.log.info("Check that dumptxoutset works for past block heights")
487          # rollback defaults to the snapshot base height
488          dump_output2 = n0.dumptxoutset('utxos2.dat', "rollback")
489          check_dump_output(dump_output2)
490          assert_equal(sha256sum_file(dump_output['path']), sha256sum_file(dump_output2['path']))
491  
492          # Rollback with specific height
493          dump_output3 = n0.dumptxoutset('utxos3.dat', rollback=SNAPSHOT_BASE_HEIGHT)
494          check_dump_output(dump_output3)
495          assert_equal(sha256sum_file(dump_output['path']), sha256sum_file(dump_output3['path']))
496  
497          # Specified height that is not a snapshot height
498          prev_snap_height = SNAPSHOT_BASE_HEIGHT - 1
499          dump_output4 = n0.dumptxoutset(path='utxos4.dat', rollback=prev_snap_height)
500          assert_equal(
501              dump_output4['txoutset_hash'],
502              "45ac2777b6ca96588210e2a4f14b602b41ec37b8b9370673048cc0af434a1ec8")
503          assert_not_equal(sha256sum_file(dump_output['path']), sha256sum_file(dump_output4['path']))
504  
505          # Use a hash instead of a height
506          prev_snap_hash = n0.getblockhash(prev_snap_height)
507          dump_output5 = n0.dumptxoutset('utxos5.dat', rollback=prev_snap_hash)
508          assert_equal(sha256sum_file(dump_output4['path']), sha256sum_file(dump_output5['path']))
509  
510          # Ensure n0 is back at the tip
511          assert_equal(n0.getblockchaininfo()["blocks"], FINAL_HEIGHT)
512  
513          self.test_snapshot_with_less_work(dump_output['path'])
514          self.test_invalid_mempool_state(dump_output['path'])
515          self.test_invalid_snapshot_scenarios(dump_output['path'])
516          self.test_invalid_chainstate_scenarios()
517          self.test_invalid_file_path()
518          self.test_snapshot_block_invalidated(dump_output['path'])
519          self.test_snapshot_not_on_most_work_chain(dump_output['path'])
520  
521          # Prune-node sanity check
522          assert 'NETWORK' not in n1.getnetworkinfo()['localservicesnames']
523  
524          self.log.info(f"Loading snapshot into second node from {dump_output['path']}")
525          # This node's tip is on an ancestor block of the snapshot, which should
526          # be the normal case
527          loaded = n1.loadtxoutset(dump_output['path'])
528          assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT)
529          assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT)
530  
531          self.log.info("Confirm that local services remain unchanged")
532          # Since n1 is a pruned node, the 'NETWORK' service flag must always be unset.
533          self.assert_only_network_limited_service(n1)
534  
535          self.log.info("Check that UTXO-querying RPCs operate on snapshot chainstate")
536          snapshot_hash = loaded['tip_hash']
537          snapshot_num_coins = loaded['coins_loaded']
538          # coinstatsindex might be not caught up yet and is not relevant for this test, so don't use it
539          utxo_info = n1.gettxoutsetinfo(use_index=False)
540          assert_equal(utxo_info['txouts'], snapshot_num_coins)
541          assert_equal(utxo_info['height'], SNAPSHOT_BASE_HEIGHT)
542          assert_equal(utxo_info['bestblock'], snapshot_hash)
543  
544          self.log.info("Check that getblockchaininfo returns information about the background validation process")
545          expected_keys = [
546              "snapshotheight",
547              "blocks",
548              "bestblockhash",
549              "mediantime",
550              "chainwork",
551              "verificationprogress"
552          ]
553          res = n1.getblockchaininfo()
554          assert "backgroundvalidation" in res.keys()
555          bv_res = res["backgroundvalidation"]
556          assert_equal(sorted(expected_keys), sorted(bv_res.keys()))
557          assert_equal(bv_res["snapshotheight"], SNAPSHOT_BASE_HEIGHT)
558          assert_equal(bv_res["blocks"], START_HEIGHT)
559          assert_equal(bv_res["bestblockhash"], n1.getblockhash(START_HEIGHT))
560          block = n1.getblockheader(bv_res["bestblockhash"])
561          assert_equal(bv_res["mediantime"], block["mediantime"])
562          assert_equal(bv_res["chainwork"], block["chainwork"])
563          background_tx_count = n1.getchaintxstats(blockhash=bv_res["bestblockhash"])["txcount"]
564          snapshot_tx_count = n1.getchaintxstats(blockhash=snapshot_hash)["txcount"]
565          expected_verification_progress = background_tx_count / snapshot_tx_count
566          assert_approx(bv_res["verificationprogress"], expected_verification_progress, vspan=0.01)
567  
568          # find coinbase output at snapshot height on node0 and scan for it on node1,
569          # where the block is not available, but the snapshot was loaded successfully
570          coinbase_tx = n0.getblock(snapshot_hash, verbosity=2)['tx'][0]
571          assert_raises_rpc_error(-1, "Block not available (not fully downloaded)", n1.getblock, snapshot_hash)
572          coinbase_output_descriptor = coinbase_tx['vout'][0]['scriptPubKey']['desc']
573          scan_result = n1.scantxoutset('start', [coinbase_output_descriptor])
574          assert_equal(scan_result['success'], True)
575          assert_equal(scan_result['txouts'], snapshot_num_coins)
576          assert_equal(scan_result['height'], SNAPSHOT_BASE_HEIGHT)
577          assert_equal(scan_result['bestblock'], snapshot_hash)
578          scan_utxos = [(coin['txid'], coin['vout']) for coin in scan_result['unspents']]
579          assert (coinbase_tx['txid'], 0) in scan_utxos
580  
581          txout_result = n1.gettxout(coinbase_tx['txid'], 0)
582          assert_equal(txout_result['scriptPubKey']['desc'], coinbase_output_descriptor)
583  
584          def check_tx_counts(final: bool) -> None:
585              """Check nTx and nChainTx intermediate values right after loading
586              the snapshot, and final values after the snapshot is validated."""
587              for height, block in blocks.items():
588                  tx = n1.getblockheader(block.hash)["nTx"]
589                  stats = n1.getchaintxstats(nblocks=1, blockhash=block.hash)
590                  chain_tx = stats.get("txcount", None)
591                  window_tx_count = stats.get("window_tx_count", None)
592                  tx_rate = stats.get("txrate", None)
593                  window_interval = stats.get("window_interval")
594  
595                  # Intermediate nTx of the starting block should be set, but nTx of
596                  # later blocks should be 0 before they are downloaded.
597                  # The window_tx_count of one block is equal to the blocks tx count.
598                  # If the window tx count is unknown, the value is missing.
599                  # The tx_rate is calculated from window_tx_count and window_interval
600                  # when possible.
601                  if final or height == START_HEIGHT:
602                      assert_equal(tx, block.tx)
603                      assert_equal(window_tx_count, tx)
604                      if window_interval > 0:
605                          assert_approx(tx_rate, window_tx_count / window_interval, vspan=0.1)
606                      else:
607                          assert_equal(tx_rate, None)
608                  else:
609                      assert_equal(tx, 0)
610                      assert_equal(window_tx_count, None)
611  
612                  # Intermediate nChainTx of the starting block and snapshot block
613                  # should be set, but others should be None until they are downloaded.
614                  if final or height in (START_HEIGHT, SNAPSHOT_BASE_HEIGHT):
615                      assert_equal(chain_tx, block.chain_tx)
616                  else:
617                      assert_equal(chain_tx, None)
618  
619          check_tx_counts(final=False)
620  
621          normal, snapshot = n1.getchainstates()["chainstates"]
622          assert_equal(normal['blocks'], START_HEIGHT)
623          assert_equal(normal.get('snapshot_blockhash'), None)
624          assert_equal(normal['validated'], True)
625          assert_equal(snapshot['blocks'], SNAPSHOT_BASE_HEIGHT)
626          assert_equal(snapshot['snapshot_blockhash'], dump_output['base_hash'])
627          assert_equal(snapshot['validated'], False)
628  
629          assert_equal(n1.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT)
630  
631          self.log.info("Submit a stale block that forked off the chain before the snapshot")
632          # Normally a block like this would not be downloaded, but if it is
633          # submitted early before the background chain catches up to the fork
634          # point, it winds up in m_blocks_unlinked and triggers a corner case
635          # that previously crashed CheckBlockIndex.
636          n1.submitblock(stale_block)
637          n1.getchaintips()
638          n1.getblock(stale_hash)
639  
640          self.log.info("Submit a spending transaction for a snapshot chainstate coin to the mempool")
641          # spend the coinbase output of the first block that is not available on node1
642          spend_coin_blockhash = n1.getblockhash(START_HEIGHT + 1)
643          assert_raises_rpc_error(-1, "Block not available (not fully downloaded)", n1.getblock, spend_coin_blockhash)
644          prev_tx = n0.getblock(spend_coin_blockhash, 3)['tx'][0]
645          prevout = {"txid": prev_tx['txid'], "vout": 0, "scriptPubKey": prev_tx['vout'][0]['scriptPubKey']['hex']}
646          privkey = n0.get_deterministic_priv_key().key
647          raw_tx = n1.createrawtransaction([prevout], {getnewdestination()[2]: 24.99})
648          signed_tx = n1.signrawtransactionwithkey(raw_tx, [privkey], [prevout])['hex']
649          signed_txid = tx_from_hex(signed_tx).txid_hex
650  
651          assert n1.gettxout(prev_tx['txid'], 0) is not None
652          n1.sendrawtransaction(signed_tx)
653          assert signed_txid in n1.getrawmempool()
654          assert not n1.gettxout(prev_tx['txid'], 0)
655  
656          PAUSE_HEIGHT = FINAL_HEIGHT - 40
657  
658          self.log.info(f"Sync node up to height {PAUSE_HEIGHT}")
659          # During snapshot tip sync, the node must remain in 'limited' mode.
660          self.assert_only_network_limited_service(n1)
661          dumb_sync_blocks(src=n0, dst=n1, height=PAUSE_HEIGHT)
662  
663          self.log.info("Checking that blocks are segmented on disk")
664          assert self.has_blockfile(n1, "00000"), "normal blockfile missing"
665          assert self.has_blockfile(n1, "00001"), "assumed blockfile missing"
666          assert not self.has_blockfile(n1, "00002"), "too many blockfiles"
667  
668          # The node must remain in 'limited' mode
669          self.assert_only_network_limited_service(n1)
670  
671          # Send snapshot block to n1 out of order. This makes the test less
672          # realistic because normally the snapshot block is one of the last
673          # blocks downloaded, but its useful to test because it triggers more
674          # corner cases in ReceivedBlockTransactions() and CheckBlockIndex()
675          # setting and testing nChainTx values, and it exposed previous bugs.
676          snapshot_hash = n0.getblockhash(SNAPSHOT_BASE_HEIGHT)
677          snapshot_block = n0.getblock(snapshot_hash, 0)
678          n1.submitblock(snapshot_block)
679  
680          self.connect_nodes(0, 1)
681  
682          self.log.info(f"Ensuring snapshot chain syncs to tip. ({FINAL_HEIGHT})")
683          self.wait_until(lambda: n1.getchainstates()['chainstates'][-1]['blocks'] == FINAL_HEIGHT)
684          self.sync_blocks(nodes=(n0, n1))
685  
686          self.log.info("Ensuring background validation completes")
687          self.wait_until(lambda: len(n1.getchainstates()['chainstates']) == 1)
688  
689          # Since n1 is a pruned node, it will not signal NODE_NETWORK after
690          # completing the background sync.
691          self.assert_only_network_limited_service(n1)
692  
693          # Ensure indexes have synced.
694          completed_idx_state = {
695              'basic block filter index': COMPLETE_IDX,
696              'coinstatsindex': COMPLETE_IDX,
697          }
698          self.wait_until(lambda: n1.getindexinfo() == completed_idx_state)
699  
700          self.log.info("Re-check nTx and nChainTx values")
701          check_tx_counts(final=True)
702  
703          for i in (0, 1):
704              n = self.nodes[i]
705              self.log.info(f"Restarting node {i} to ensure (Check|Load)BlockIndex passes")
706              with self.assert_disk_cleanup(n, i == 1):
707                  self.restart_node(i, extra_args=self.extra_args[i])
708  
709              assert_equal(n.getblockchaininfo()["blocks"], FINAL_HEIGHT)
710  
711              chainstate, = n.getchainstates()['chainstates']
712              assert_equal(chainstate['blocks'], FINAL_HEIGHT)
713  
714              if i != 0:
715                  # Ensure indexes have synced for the assumeutxo node
716                  self.wait_until(lambda: n.getindexinfo() == completed_idx_state)
717  
718  
719          # Node 2: all indexes + reindex
720          # -----------------------------
721  
722          self.log.info("-- Testing all indexes + reindex")
723          assert_equal(n2.getblockcount(), START_HEIGHT)
724          assert 'NETWORK' in n2.getnetworkinfo()['localservicesnames']  # sanity check
725  
726          self.log.info(f"Loading snapshot into third node from {dump_output['path']}")
727          loaded = n2.loadtxoutset(dump_output['path'])
728          assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT)
729          assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT)
730  
731          # Even though n2 is a full node, it will unset the 'NETWORK' service flag during snapshot loading.
732          # This indicates other peers that the node will temporarily not provide historical blocks.
733          self.log.info("Check node2 updated the local services during snapshot load")
734          self.assert_only_network_limited_service(n2)
735  
736          for reindex_arg in ['-reindex=1', '-reindex-chainstate=1']:
737              self.log.info(f"Check that restarting with {reindex_arg} will delete the snapshot chainstate")
738              self.restart_node(2, extra_args=[reindex_arg, *self.extra_args[2]])
739              assert_equal(1, len(n2.getchainstates()["chainstates"]))
740              for i in range(1, 300):
741                  block = n0.getblock(n0.getblockhash(i), 0)
742                  n2.submitheader(block)
743              loaded = n2.loadtxoutset(dump_output['path'])
744              assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT)
745              assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT)
746  
747          normal, snapshot = n2.getchainstates()['chainstates']
748          assert_equal(normal['blocks'], START_HEIGHT)
749          assert_equal(normal.get('snapshot_blockhash'), None)
750          assert_equal(normal['validated'], True)
751          assert_equal(snapshot['blocks'], SNAPSHOT_BASE_HEIGHT)
752          assert_equal(snapshot['snapshot_blockhash'], dump_output['base_hash'])
753          assert_equal(snapshot['validated'], False)
754  
755          self.log.info("Check that loading the snapshot again will fail because there is already an active snapshot.")
756          msg = "Unable to load UTXO snapshot: Can't activate a snapshot-based chainstate more than once"
757          assert_raises_rpc_error(-32603, msg, n2.loadtxoutset, dump_output['path'])
758  
759          # Upon restart, the node must stay in 'limited' mode until the background
760          # chain sync completes.
761          self.restart_node(2, extra_args=self.extra_args[2])
762          self.assert_only_network_limited_service(n2)
763  
764          self.connect_nodes(0, 2)
765          self.wait_until(lambda: n2.getchainstates()['chainstates'][-1]['blocks'] == FINAL_HEIGHT)
766          self.sync_blocks(nodes=(n0, n2))
767  
768          self.log.info("Ensuring background validation completes")
769          self.wait_until(lambda: len(n2.getchainstates()['chainstates']) == 1)
770  
771          # Once background chain sync completes, the full node must start offering historical blocks again.
772          self.wait_until(lambda: {'NETWORK', 'NETWORK_LIMITED'}.issubset(n2.getnetworkinfo()['localservicesnames']))
773  
774          completed_idx_state = {
775              'basic block filter index': COMPLETE_IDX,
776              'coinstatsindex': COMPLETE_IDX,
777              'txindex': COMPLETE_IDX,
778          }
779          self.wait_until(lambda: n2.getindexinfo() == completed_idx_state)
780  
781          for i in (0, 2):
782              n = self.nodes[i]
783              self.log.info(f"Restarting node {i} to ensure (Check|Load)BlockIndex passes")
784              with self.assert_disk_cleanup(n, i == 2):
785                  self.restart_node(i, extra_args=self.extra_args[i])
786  
787              assert_equal(n.getblockchaininfo()["blocks"], FINAL_HEIGHT)
788  
789              chainstate, = n.getchainstates()['chainstates']
790              assert_equal(chainstate['blocks'], FINAL_HEIGHT)
791  
792              if i != 0:
793                  # Ensure indexes have synced for the assumeutxo node
794                  self.wait_until(lambda: n.getindexinfo() == completed_idx_state)
795  
796          self.log.info("Test -reindex-chainstate of an assumeutxo-synced node")
797          self.restart_node(2, extra_args=[
798              '-reindex-chainstate=1', *self.extra_args[2]])
799          assert_equal(n2.getblockchaininfo()["blocks"], FINAL_HEIGHT)
800          self.wait_until(lambda: n2.getblockcount() == FINAL_HEIGHT)
801  
802          self.log.info("Test -reindex of an assumeutxo-synced node")
803          self.restart_node(2, extra_args=['-reindex=1', *self.extra_args[2]])
804          self.connect_nodes(0, 2)
805          self.wait_until(lambda: n2.getblockcount() == FINAL_HEIGHT)
806  
807          self.test_snapshot_in_a_divergent_chain(dump_output['path'])
808  
809          # The following test cleans node2 and node3 chain directories.
810          self.test_sync_from_assumeutxo_node(snapshot=dump_output)
811  
812          self.test_sync_to_most_work_chain_after_background_validation()
813  
814  @dataclass
815  class Block:
816      hash: str
817      tx: int
818      chain_tx: int
819  
820  if __name__ == '__main__':
821      AssumeutxoTest(__file__).main()