/ test / functional / test_framework / test_framework.py
test_framework.py
   1  #!/usr/bin/env python3
   2  # Copyright (c) 2014-present The Bitcoin Core developers
   3  # Distributed under the MIT software license, see the accompanying
   4  # file COPYING or http://www.opensource.org/licenses/mit-license.php.
   5  """Base class for RPC testing."""
   6  
   7  import configparser
   8  from enum import Enum
   9  import argparse
  10  from datetime import datetime, timezone
  11  import logging
  12  import os
  13  from pathlib import Path
  14  import platform
  15  import pdb
  16  import random
  17  import re
  18  import shutil
  19  import subprocess
  20  import sys
  21  import tempfile
  22  import time
  23  
  24  from .address import create_deterministic_address_bcrt1_p2tr_op_true
  25  from .authproxy import JSONRPCException
  26  from . import coverage
  27  from .p2p import NetworkThread
  28  from .test_node import TestNode
  29  from .util import (
  30      Binaries,
  31      MAX_NODES,
  32      PortSeed,
  33      assert_equal,
  34      check_json_precision,
  35      export_env_build_path,
  36      find_vout_for_address,
  37      get_binary_paths,
  38      get_datadir_path,
  39      initialize_datadir,
  40      p2p_port,
  41      wait_until_helper_internal,
  42      wallet_importprivkey,
  43  )
  44  
  45  
  46  class TestStatus(Enum):
  47      PASSED = 1
  48      FAILED = 2
  49      SKIPPED = 3
  50  
  51  TEST_EXIT_PASSED = 0
  52  TEST_EXIT_FAILED = 1
  53  TEST_EXIT_SKIPPED = 77
  54  
  55  TMPDIR_PREFIX = "bitcoin_func_test_"
  56  
  57  
  58  class SkipTest(Exception):
  59      """This exception is raised to skip a test"""
  60  
  61      def __init__(self, message):
  62          self.message = message
  63  
  64  
  65  class BitcoinTestMetaClass(type):
  66      """Metaclass for BitcoinTestFramework.
  67  
  68      Ensures that any attempt to register a subclass of `BitcoinTestFramework`
  69      adheres to a standard whereby the subclass overrides `set_test_params` and
  70      `run_test` but DOES NOT override either `__init__` or `main`. If any of
  71      those standards are violated, a ``TypeError`` is raised."""
  72  
  73      def __new__(cls, clsname, bases, dct):
  74          if not clsname == 'BitcoinTestFramework':
  75              if not ('run_test' in dct and 'set_test_params' in dct):
  76                  raise TypeError("BitcoinTestFramework subclasses must override "
  77                                  "'run_test' and 'set_test_params'")
  78              if '__init__' in dct or 'main' in dct:
  79                  raise TypeError("BitcoinTestFramework subclasses may not override "
  80                                  "'__init__' or 'main'")
  81  
  82          return super().__new__(cls, clsname, bases, dct)
  83  
  84  
  85  class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
  86      """Base class for a bitcoin test script.
  87  
  88      Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
  89  
  90      Individual tests can also override the following methods to customize the test setup:
  91  
  92      - add_options()
  93      - setup_chain()
  94      - setup_network()
  95      - setup_nodes()
  96  
  97      The __init__() and main() methods should not be overridden.
  98  
  99      This class also contains various public and private helper methods."""
 100  
 101      def __init__(self, test_file) -> None:
 102          """Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
 103          self.chain: str = 'regtest'
 104          self.setup_clean_chain: bool = False
 105          self.noban_tx_relay: bool = False
 106          self.nodes: list[TestNode] = []
 107          self.extra_args = None
 108          self.extra_init = None
 109          self.network_thread = None
 110          self.rpc_timeout = 60  # Wait for up to 60 seconds for the RPC server to respond
 111          self.supports_cli = True
 112          self.bind_to_localhost_only = True
 113          self.parse_args(test_file)
 114          self.default_wallet_name = "default_wallet"
 115          self.wallet_data_filename = "wallet.dat"
 116          # Optional list of wallet names that can be set in set_test_params to
 117          # create and import keys to. If unset, default is len(nodes) *
 118          # [default_wallet_name]. If wallet names are None, wallet creation is
 119          # skipped. If list is truncated, wallet creation is skipped and keys
 120          # are not imported.
 121          self.wallet_names = None
 122          # By default the wallet is not required. Set to true by skip_if_no_wallet().
 123          # Can also be set to None to indicate that the wallet will be used if available.
 124          # When False or None, we ignore wallet_names in setup_nodes().
 125          self.uses_wallet = False
 126          # Disable ThreadOpenConnections by default, so that adding entries to
 127          # addrman will not result in automatic connections to them.
 128          self.disable_autoconnect = True
 129          self.set_test_params()
 130          assert self.wallet_names is None or len(self.wallet_names) <= self.num_nodes
 131          self.rpc_timeout = int(self.rpc_timeout * self.options.timeout_factor) # optionally, increase timeout by a factor
 132  
 133      def main(self):
 134          """Main function. This should not be overridden by the subclass test scripts."""
 135  
 136          assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
 137  
 138          try:
 139              self.setup()
 140              if self.options.test_methods:
 141                  self.run_test_methods()
 142              else:
 143                  self.run_test()
 144  
 145          except SkipTest as e:
 146              self.log.warning("Test Skipped: %s" % e.message)
 147              self.success = TestStatus.SKIPPED
 148          except subprocess.CalledProcessError as e:
 149              self.log.exception(f"Called Process failed with stdout='{e.stdout}'; stderr='{e.stderr}';")
 150              self.success = TestStatus.FAILED
 151          except BaseException:
 152              # The `exception` log will add the exception info to the message.
 153              # https://docs.python.org/3/library/logging.html#logging.exception
 154              self.log.exception("Unexpected exception:")
 155              self.success = TestStatus.FAILED
 156          finally:
 157              exit_code = self.shutdown()
 158              sys.exit(exit_code)
 159  
 160      def run_test_methods(self):
 161          for method_name in self.options.test_methods:
 162              self.log.info(f"Attempting to execute method: {method_name}")
 163              method = getattr(self, method_name)
 164              method()
 165              self.log.info(f"Method '{method_name}' executed successfully.")
 166  
 167      def parse_args(self, test_file):
 168          previous_releases_path = os.getenv("PREVIOUS_RELEASES_DIR") or os.getcwd() + "/releases"
 169          parser = argparse.ArgumentParser(usage="%(prog)s [options]")
 170          parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
 171                              help="Leave bitcoinds and test.* datadir on exit or error")
 172          parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(test_file) + "/../cache"),
 173                              help="Directory for caching pregenerated datadirs (default: %(default)s)")
 174          parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs (must not exist)")
 175          parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
 176                              help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
 177          parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
 178                              help="Print out all RPC calls as they are made")
 179          parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
 180                              help="The seed to use for assigning port numbers (default: current process id)")
 181          parser.add_argument("--previous-releases", dest="prev_releases", action="store_true",
 182                              default=os.path.isdir(previous_releases_path) and bool(os.listdir(previous_releases_path)),
 183                              help="Force test of previous releases (default: %(default)s). Previous releases binaries can be downloaded via `test/get_previous_releases.py`.")
 184          parser.add_argument("--coveragedir", dest="coveragedir",
 185                              help="Write tested RPC commands into this directory")
 186          parser.add_argument("--configfile", dest="configfile",
 187                              default=os.path.abspath(os.path.dirname(test_file) + "/../config.ini"),
 188                              help="Location of the test framework config file (default: %(default)s)")
 189          parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
 190                              help="Attach a python debugger if test fails")
 191          parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
 192                              help="use bitcoin-cli instead of RPC for all commands")
 193          parser.add_argument("--perf", dest="perf", default=False, action="store_true",
 194                              help="profile running nodes with perf for the duration of the test")
 195          parser.add_argument("--valgrind", dest="valgrind", default=False, action="store_true",
 196                              help="Run binaries under the valgrind memory error detector: Expect at least a ~10x slowdown. Does not apply to previous release binaries.")
 197          parser.add_argument("--randomseed", type=int,
 198                              help="set a random seed for deterministically reproducing a previous test run")
 199          parser.add_argument("--timeout-factor", dest="timeout_factor", type=float, help="adjust test timeouts by a factor. Setting it to 0 disables all timeouts")
 200          parser.add_argument("--v2transport", dest="v2transport", default=False, action="store_true",
 201                              help="use BIP324 v2 connections between all nodes by default")
 202          parser.add_argument("--v1transport", dest="v1transport", default=False, action="store_true",
 203                              help="Explicitly use v1 transport (can be used to overwrite global --v2transport option)")
 204          parser.add_argument("--test_methods", dest="test_methods", nargs='*',
 205                              help="Run specified test methods sequentially instead of the full test. Use only for methods that do not depend on any context set up in run_test or other methods.")
 206  
 207          self.add_options(parser)
 208          # Running TestShell in a Jupyter notebook causes an additional -f argument
 209          # To keep TestShell from failing with an "unrecognized argument" error, we add a dummy "-f" argument
 210          # source: https://stackoverflow.com/questions/48796169/how-to-fix-ipykernel-launcher-py-error-unrecognized-arguments-in-jupyter/56349168#56349168
 211          parser.add_argument("-f", "--fff", help="a dummy argument to fool ipython", default="1")
 212          self.options = parser.parse_args()
 213          if self.options.timeout_factor == 0:
 214              self.options.timeout_factor = 999
 215          self.options.timeout_factor = self.options.timeout_factor or (4 if self.options.valgrind else 1)
 216          self.options.previous_releases_path = previous_releases_path
 217  
 218          self.config = configparser.ConfigParser()
 219          self.config.read_file(open(self.options.configfile))
 220          self.binary_paths = get_binary_paths(self.config)
 221          if self.options.v1transport:
 222              self.options.v2transport=False
 223  
 224          PortSeed.n = self.options.port_seed
 225  
 226      def get_binaries(self, bin_dir=None):
 227          return Binaries(self.binary_paths, bin_dir, use_valgrind=self.options.valgrind)
 228  
 229      def setup(self):
 230          """Call this method to start up the test framework object with options set."""
 231  
 232          check_json_precision()
 233          export_env_build_path(self.config)
 234  
 235          self.options.cachedir = os.path.abspath(self.options.cachedir)
 236  
 237          # Set up temp directory and start logging
 238          if self.options.tmpdir:
 239              self.options.tmpdir = os.path.abspath(self.options.tmpdir)
 240              os.makedirs(self.options.tmpdir, exist_ok=False)
 241          else:
 242              self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
 243          self._start_logging()
 244  
 245          # Seed the PRNG. Note that test runs are reproducible if and only if
 246          # a single thread accesses the PRNG. For more information, see
 247          # https://docs.python.org/3/library/random.html#notes-on-reproducibility.
 248          # The network thread shouldn't access random. If we need to change the
 249          # network thread to access randomness, it should instantiate its own
 250          # random.Random object.
 251          seed = self.options.randomseed
 252  
 253          if seed is None:
 254              seed = random.randrange(sys.maxsize)
 255          else:
 256              self.log.info("User supplied random seed {}".format(seed))
 257  
 258          random.seed(seed)
 259          self.log.info("PRNG seed is: {}".format(seed))
 260  
 261          self.log.debug('Setting up network thread')
 262          self.network_thread = NetworkThread()
 263          self.network_thread.start()
 264          self.wait_until(lambda: self.network_thread.network_event_loop is not None and self.network_thread.network_event_loop.is_running())
 265  
 266          if self.options.usecli:
 267              if not self.supports_cli:
 268                  raise SkipTest("--usecli specified but test does not support using CLI")
 269              self.skip_if_no_cli()
 270          self.skip_test_if_missing_module()
 271          self.setup_chain()
 272          self.setup_network()
 273  
 274          self.success = TestStatus.PASSED
 275  
 276      def shutdown(self):
 277          """Call this method to shut down the test framework object."""
 278  
 279          if self.success == TestStatus.FAILED and self.options.pdbonfailure:
 280              print("Testcase failed. Attaching python debugger. Enter ? for help")
 281              pdb.set_trace()
 282  
 283          self.log.debug('Closing down network thread')
 284          self.network_thread.close(timeout=self.options.timeout_factor * 10)
 285          if self.success == TestStatus.FAILED:
 286              self.log.info("Not stopping nodes as test failed. The dangling processes will be cleaned up later.")
 287          else:
 288              self.log.info("Stopping nodes")
 289              if self.nodes:
 290                  self.stop_nodes()
 291  
 292          should_clean_up = (
 293              not self.options.nocleanup and
 294              self.success != TestStatus.FAILED and
 295              not self.options.perf
 296          )
 297          if should_clean_up:
 298              self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
 299              cleanup_tree_on_exit = True
 300          elif self.options.perf:
 301              self.log.warning("Not cleaning up dir {} due to perf data".format(self.options.tmpdir))
 302              cleanup_tree_on_exit = False
 303          else:
 304              self.log.warning("Not cleaning up dir {}".format(self.options.tmpdir))
 305              cleanup_tree_on_exit = False
 306  
 307          if self.success == TestStatus.PASSED:
 308              self.log.info("Tests successful")
 309              exit_code = TEST_EXIT_PASSED
 310          elif self.success == TestStatus.SKIPPED:
 311              self.log.info("Test skipped")
 312              exit_code = TEST_EXIT_SKIPPED
 313          else:
 314              self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
 315              self.log.error("")
 316              self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
 317              self.log.error("")
 318              self.log.error("If this failure happened unexpectedly or intermittently, please file a bug and provide a link or upload of the combined log.")
 319              self.log.error(self.config['environment']['CLIENT_BUGREPORT'])
 320              self.log.error("")
 321              exit_code = TEST_EXIT_FAILED
 322          # Logging.shutdown will not remove stream- and filehandlers, so we must
 323          # do it explicitly. Handlers are removed so the next test run can apply
 324          # different log handler settings.
 325          # See: https://docs.python.org/3/library/logging.html#logging.shutdown
 326          for h in list(self.log.handlers):
 327              h.flush()
 328              h.close()
 329              self.log.removeHandler(h)
 330          rpc_logger = logging.getLogger("BitcoinRPC")
 331          for h in list(rpc_logger.handlers):
 332              h.flush()
 333              rpc_logger.removeHandler(h)
 334          if cleanup_tree_on_exit:
 335              self.cleanup_folder(self.options.tmpdir)
 336  
 337          self.nodes.clear()
 338          return exit_code
 339  
 340      # Methods to override in subclass test scripts.
 341      def set_test_params(self):
 342          """Tests must override this method to change default values for number of nodes, topology, etc"""
 343          raise NotImplementedError
 344  
 345      def add_options(self, parser):
 346          """Override this method to add command-line options to the test"""
 347          pass
 348  
 349      def skip_test_if_missing_module(self):
 350          """Override this method to skip a test if a module is not compiled"""
 351          pass
 352  
 353      def setup_chain(self):
 354          """Override this method to customize blockchain setup"""
 355          self.log.info("Initializing test directory " + self.options.tmpdir)
 356          if self.setup_clean_chain:
 357              self._initialize_chain_clean()
 358          else:
 359              self._initialize_chain()
 360  
 361      def setup_network(self):
 362          """Override this method to customize test network topology"""
 363          self.setup_nodes()
 364  
 365          # Connect the nodes as a "chain".  This allows us
 366          # to split the network between nodes 1 and 2 to get
 367          # two halves that can work on competing chains.
 368          #
 369          # Topology looks like this:
 370          # node0 <-- node1 <-- node2 <-- node3
 371          #
 372          # If all nodes are in IBD (clean chain from genesis), node0 is assumed to be the source of blocks (miner). To
 373          # ensure block propagation, all nodes will establish outgoing connections toward node0.
 374          # See fPreferredDownload in net_processing.
 375          #
 376          # If further outbound connections are needed, they can be added at the beginning of the test with e.g.
 377          # self.connect_nodes(1, 2)
 378          for i in range(self.num_nodes - 1):
 379              self.connect_nodes(i + 1, i)
 380          self.sync_all()
 381  
 382      def setup_nodes(self):
 383          """Override this method to customize test node setup"""
 384          self.add_nodes(self.num_nodes, self.extra_args)
 385          self.start_nodes()
 386          if self.uses_wallet:
 387              self.import_deterministic_coinbase_privkeys()
 388          if not self.setup_clean_chain:
 389              for n in self.nodes:
 390                  assert_equal(n.getblockchaininfo()["blocks"], 199)
 391              # To ensure that all nodes are out of IBD, the most recent block
 392              # must have a timestamp not too old (see IsInitialBlockDownload()).
 393              self.log.debug('Generate a block with current time')
 394              block_hash = self.generate(self.nodes[0], 1, sync_fun=self.no_op)[0]
 395              block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0)
 396              for n in self.nodes:
 397                  n.submitblock(block)
 398                  chain_info = n.getblockchaininfo()
 399                  assert_equal(chain_info["blocks"], 200)
 400                  assert_equal(chain_info["initialblockdownload"], False)
 401  
 402      def import_deterministic_coinbase_privkeys(self):
 403          for i in range(self.num_nodes):
 404              self.init_wallet(node=i)
 405  
 406      def init_wallet(self, *, node):
 407          wallet_name = self.default_wallet_name if self.wallet_names is None else self.wallet_names[node] if node < len(self.wallet_names) else False
 408          if wallet_name is not False:
 409              n = self.nodes[node]
 410              if wallet_name is not None:
 411                  n.createwallet(wallet_name=wallet_name, load_on_startup=True)
 412              wallet_importprivkey(n.get_wallet_rpc(wallet_name), n.get_deterministic_priv_key().key, 0, label="coinbase")
 413  
 414      def run_test(self):
 415          """Tests must override this method to define test logic"""
 416          raise NotImplementedError
 417  
 418      # Public helper methods. These can be accessed by the subclass test scripts.
 419  
 420      def add_nodes(self, num_nodes: int, extra_args=None, *, rpchost=None, versions=None):
 421          """Instantiate TestNode objects.
 422  
 423          Should only be called once after the nodes have been specified in
 424          set_test_params()."""
 425          def bin_dir_from_version(version):
 426              if not version:
 427                  return None
 428              if version > 219999:
 429                  # Starting at client version 220000 the first two digits represent
 430                  # the major version, e.g. v22.0 instead of v0.22.0.
 431                  version *= 100
 432              return os.path.join(
 433                  self.options.previous_releases_path,
 434                  re.sub(
 435                      r'\.0$' if version <= 219999 else r'(\.0){1,2}$',
 436                      '', # Remove trailing dot for point releases, after 22.0 also remove double trailing dot.
 437                      'v{}.{}.{}.{}'.format(
 438                          (version % 100000000) // 1000000,
 439                          (version % 1000000) // 10000,
 440                          (version % 10000) // 100,
 441                          (version % 100) // 1,
 442                      ),
 443                  ),
 444                  'bin',
 445              )
 446  
 447          if self.bind_to_localhost_only:
 448              extra_confs = [["bind=127.0.0.1"]] * num_nodes
 449          else:
 450              extra_confs = [[]] * num_nodes
 451          if extra_args is None:
 452              extra_args = [[]] * num_nodes
 453          # Whitelist peers to speed up tx relay / mempool sync. Don't use it if testing tx relay or timing.
 454          if self.noban_tx_relay:
 455              for i in range(len(extra_args)):
 456                  extra_args[i] = extra_args[i] + ["-whitelist=noban,in,out@127.0.0.1"]
 457          if versions is None:
 458              versions = [None] * num_nodes
 459          bin_dirs = []
 460          for v in versions:
 461              bin_dir = bin_dir_from_version(v)
 462              bin_dirs.append(bin_dir)
 463  
 464          extra_init = [{}] * num_nodes if self.extra_init is None else self.extra_init # type: ignore[var-annotated]
 465          assert_equal(len(extra_init), num_nodes)
 466          assert_equal(len(extra_confs), num_nodes)
 467          assert_equal(len(extra_args), num_nodes)
 468          assert_equal(len(versions), num_nodes)
 469          assert_equal(len(bin_dirs), num_nodes)
 470          for i in range(num_nodes):
 471              args = list(extra_args[i])
 472              init = dict(
 473                  chain=self.chain,
 474                  rpchost=rpchost,
 475                  timewait=self.rpc_timeout,
 476                  timeout_factor=self.options.timeout_factor,
 477                  binaries=self.get_binaries(bin_dirs[i]),
 478                  version=versions[i],
 479                  coverage_dir=self.options.coveragedir,
 480                  cwd=self.options.tmpdir,
 481                  extra_conf=extra_confs[i],
 482                  extra_args=args,
 483                  use_cli=self.options.usecli,
 484                  start_perf=self.options.perf,
 485                  v2transport=self.options.v2transport,
 486                  uses_wallet=self.uses_wallet,
 487              )
 488              init.update(extra_init[i])
 489              test_node_i = TestNode(
 490                  i,
 491                  get_datadir_path(self.options.tmpdir, i),
 492                  **init)
 493              self.nodes.append(test_node_i)
 494              if not test_node_i.version_is_at_least(170000):
 495                  # adjust conf for pre 17
 496                  test_node_i.replace_in_config([('[regtest]', '')])
 497  
 498      def start_node(self, i, *args, **kwargs):
 499          """Start a bitcoind"""
 500  
 501          node = self.nodes[i]
 502  
 503          node.start(*args, **kwargs)
 504          node.wait_for_rpc_connection()
 505  
 506          if self.options.coveragedir is not None:
 507              coverage.write_all_rpc_commands(self.options.coveragedir, node._rpc)
 508  
 509      def start_nodes(self, extra_args=None, *args, **kwargs):
 510          """Start multiple bitcoinds"""
 511  
 512          if extra_args is None:
 513              extra_args = [None] * self.num_nodes
 514          assert_equal(len(extra_args), self.num_nodes)
 515          for i, node in enumerate(self.nodes):
 516              node.start(extra_args[i], *args, **kwargs)
 517          for node in self.nodes:
 518              node.wait_for_rpc_connection()
 519  
 520          if self.options.coveragedir is not None:
 521              for node in self.nodes:
 522                  coverage.write_all_rpc_commands(self.options.coveragedir, node._rpc)
 523  
 524      def stop_node(self, i, expected_stderr='', wait=0):
 525          """Stop a bitcoind test node"""
 526          self.nodes[i].stop_node(expected_stderr, wait=wait)
 527  
 528      def stop_nodes(self, wait=0):
 529          """Stop multiple bitcoind test nodes"""
 530          for node in self.nodes:
 531              # Issue RPC to stop nodes
 532              node.stop_node(wait=wait, wait_until_stopped=False)
 533  
 534          for node in self.nodes:
 535              # Wait for nodes to stop
 536              node.wait_until_stopped()
 537  
 538      def restart_node(self, i, extra_args=None, clear_addrman=False, *, expected_stderr=''):
 539          """Stop and start a test node"""
 540          self.stop_node(i, expected_stderr=expected_stderr)
 541          if clear_addrman:
 542              peers_dat = self.nodes[i].chain_path / "peers.dat"
 543              os.remove(peers_dat)
 544              with self.nodes[i].assert_debug_log(expected_msgs=[f'Creating peers.dat because the file was not found ("{peers_dat}")']):
 545                  self.start_node(i, extra_args)
 546          else:
 547              self.start_node(i, extra_args)
 548  
 549      def wait_for_node_exit(self, i, timeout):
 550          self.nodes[i].process.wait(timeout)
 551  
 552      def connect_nodes(self, a, b, *, peer_advertises_v2=None):
 553          from_connection = self.nodes[a]
 554          to_connection = self.nodes[b]
 555  
 556          # Use subversion as peer id. Test nodes have their node number appended to the user agent string
 557          from_connection_subver = from_connection.getnetworkinfo()['subversion']
 558          to_connection_subver = to_connection.getnetworkinfo()['subversion']
 559  
 560          def find_conn(node, peer_subversion, inbound):
 561              return next(filter(lambda peer: peer['subver'] == peer_subversion and peer['inbound'] == inbound, node.getpeerinfo()), None)
 562  
 563          self.wait_until(lambda: not find_conn(from_connection, to_connection_subver, inbound=False))
 564          self.wait_until(lambda: not find_conn(to_connection, from_connection_subver, inbound=True))
 565  
 566          ip_port = "127.0.0.1:" + str(p2p_port(b))
 567  
 568          if peer_advertises_v2 is None:
 569              peer_advertises_v2 = from_connection.use_v2transport
 570  
 571          if peer_advertises_v2 != from_connection.use_v2transport:
 572              from_connection.addnode(node=ip_port, command="onetry", v2transport=peer_advertises_v2)
 573          else:
 574              # skip the optional third argument if it matches the default, for
 575              # compatibility with older clients
 576              from_connection.addnode(ip_port, "onetry")
 577  
 578          self.wait_until(lambda: find_conn(from_connection, to_connection_subver, inbound=False) is not None)
 579          self.wait_until(lambda: find_conn(to_connection, from_connection_subver, inbound=True) is not None)
 580  
 581          def check_bytesrecv(peer, msg_type, min_bytes_recv):
 582              assert peer is not None, "Error: peer disconnected"
 583              return peer['bytesrecv_per_msg'].pop(msg_type, 0) >= min_bytes_recv
 584  
 585          # Poll until version handshake (fSuccessfullyConnected) is complete to
 586          # avoid race conditions, because some message types are blocked from
 587          # being sent or received before fSuccessfullyConnected.
 588          #
 589          # As the flag fSuccessfullyConnected is not exposed, check it by
 590          # waiting for a pong, which can only happen after the flag was set.
 591          self.wait_until(lambda: check_bytesrecv(find_conn(from_connection, to_connection_subver, inbound=False), 'pong', 29))
 592          self.wait_until(lambda: check_bytesrecv(find_conn(to_connection, from_connection_subver, inbound=True), 'pong', 29))
 593  
 594      def disconnect_nodes(self, a, b):
 595          def disconnect_nodes_helper(node_a, node_b):
 596              def get_peer_ids(from_connection, node_num):
 597                  result = []
 598                  for peer in from_connection.getpeerinfo():
 599                      if "testnode{}".format(node_num) in peer['subver']:
 600                          result.append(peer['id'])
 601                  return result
 602  
 603              peer_ids = get_peer_ids(node_a, node_b.index)
 604              if not peer_ids:
 605                  self.log.warning("disconnect_nodes: {} and {} were not connected".format(
 606                      node_a.index,
 607                      node_b.index,
 608                  ))
 609                  return
 610              for peer_id in peer_ids:
 611                  try:
 612                      node_a.disconnectnode(nodeid=peer_id)
 613                  except JSONRPCException as e:
 614                      # If this node is disconnected between calculating the peer id
 615                      # and issuing the disconnect, don't worry about it.
 616                      # This avoids a race condition if we're mass-disconnecting peers.
 617                      if e.error['code'] != -29:  # RPC_CLIENT_NODE_NOT_CONNECTED
 618                          raise
 619  
 620              # wait to disconnect
 621              self.wait_until(lambda: not get_peer_ids(node_a, node_b.index), timeout=5)
 622              self.wait_until(lambda: not get_peer_ids(node_b, node_a.index), timeout=5)
 623  
 624          disconnect_nodes_helper(self.nodes[a], self.nodes[b])
 625  
 626      def split_network(self):
 627          """
 628          Split the network of four nodes into nodes 0/1 and 2/3.
 629          """
 630          self.disconnect_nodes(1, 2)
 631          self.sync_all(self.nodes[:2])
 632          self.sync_all(self.nodes[2:])
 633  
 634      def join_network(self):
 635          """
 636          Join the (previously split) network halves together.
 637          """
 638          self.connect_nodes(1, 2)
 639          self.sync_all()
 640  
 641      def no_op(self):
 642          pass
 643  
 644      def generate(self, generator, *args, sync_fun=None, **kwargs):
 645          blocks = generator.generate(*args, called_by_framework=True, **kwargs)
 646          sync_fun() if sync_fun else self.sync_all()
 647          return blocks
 648  
 649      def generateblock(self, generator, *args, sync_fun=None, **kwargs):
 650          blocks = generator.generateblock(*args, called_by_framework=True, **kwargs)
 651          sync_fun() if sync_fun else self.sync_all()
 652          return blocks
 653  
 654      def generatetoaddress(self, generator, *args, sync_fun=None, **kwargs):
 655          blocks = generator.generatetoaddress(*args, called_by_framework=True, **kwargs)
 656          sync_fun() if sync_fun else self.sync_all()
 657          return blocks
 658  
 659      def generatetodescriptor(self, generator, *args, sync_fun=None, **kwargs):
 660          blocks = generator.generatetodescriptor(*args, called_by_framework=True, **kwargs)
 661          sync_fun() if sync_fun else self.sync_all()
 662          return blocks
 663  
 664      def create_outpoints(self, node, *, outputs):
 665          """Send funds to a given list of `{address: amount}` targets using the bitcoind
 666          wallet and return the corresponding outpoints as a list of dictionaries
 667          `[{"txid": txid, "vout": vout1}, {"txid": txid, "vout": vout2}, ...]`.
 668          The result can be used to specify inputs for RPCs like `createrawtransaction`,
 669          `createpsbt`, `lockunspent` etc."""
 670          for output in outputs:
 671              assert_equal(len(output.keys()), 1)
 672          send_res = node.send(outputs)
 673          assert send_res["complete"]
 674          utxos = []
 675          for output in outputs:
 676              address = list(output.keys())[0]
 677              vout = find_vout_for_address(node, send_res["txid"], address)
 678              utxos.append({"txid": send_res["txid"], "vout": vout})
 679          return utxos
 680  
 681      def sync_blocks(self, nodes=None, wait=1, timeout=60):
 682          """
 683          Wait until everybody has the same tip.
 684          sync_blocks needs to be called with an rpc_connections set that has least
 685          one node already synced to the latest, stable tip, otherwise there's a
 686          chance it might return before all nodes are stably synced.
 687          """
 688          rpc_connections = nodes or self.nodes
 689          timeout = int(timeout * self.options.timeout_factor)
 690          stop_time = time.time() + timeout
 691          while time.time() <= stop_time:
 692              best_hash = [x.getbestblockhash() for x in rpc_connections]
 693              if best_hash.count(best_hash[0]) == len(rpc_connections):
 694                  return
 695              # Check that each peer has at least one connection
 696              assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
 697              time.sleep(wait)
 698          raise AssertionError("Block sync timed out after {}s:{}".format(
 699              timeout,
 700              "".join("\n  {!r}".format(b) for b in best_hash),
 701          ))
 702  
 703      def sync_mempools(self, nodes=None, wait=1, timeout=60, flush_scheduler=True):
 704          """
 705          Wait until everybody has the same transactions in their memory
 706          pools
 707          """
 708          rpc_connections = nodes or self.nodes
 709          timeout = int(timeout * self.options.timeout_factor)
 710          stop_time = time.time() + timeout
 711          while time.time() <= stop_time:
 712              pool = [set(r.getrawmempool()) for r in rpc_connections]
 713              if pool.count(pool[0]) == len(rpc_connections):
 714                  if flush_scheduler:
 715                      for r in rpc_connections:
 716                          r.syncwithvalidationinterfacequeue()
 717                  return
 718              # Check that each peer has at least one connection
 719              assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
 720              time.sleep(wait)
 721          raise AssertionError("Mempool sync timed out after {}s:{}".format(
 722              timeout,
 723              "".join("\n  {!r}".format(m) for m in pool),
 724          ))
 725  
 726      def sync_all(self, nodes=None):
 727          self.sync_blocks(nodes)
 728          self.sync_mempools(nodes)
 729  
 730      def wait_until(self, test_function, timeout=60, check_interval=0.05):
 731          return wait_until_helper_internal(test_function, timeout=timeout, timeout_factor=self.options.timeout_factor, check_interval=check_interval)
 732  
 733      # Private helper methods. These should not be accessed by the subclass test scripts.
 734  
 735      def _start_logging(self):
 736          # Add logger and logging handlers
 737          self.log = logging.getLogger('TestFramework')
 738          self.log.setLevel(logging.DEBUG)
 739          # Create file handler to log all messages
 740          fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
 741          fh.setLevel(logging.DEBUG)
 742          # Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
 743          ch = logging.StreamHandler(sys.stdout)
 744          # User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
 745          ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
 746          ch.setLevel(ll)
 747  
 748          # Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
 749          class MicrosecondFormatter(logging.Formatter):
 750              def formatTime(self, record, _=None):
 751                  dt = datetime.fromtimestamp(record.created, timezone.utc)
 752                  return dt.strftime('%Y-%m-%dT%H:%M:%S.%f')
 753  
 754          formatter = MicrosecondFormatter(
 755              fmt='%(asctime)sZ %(name)s (%(levelname)s): %(message)s',
 756          )
 757          fh.setFormatter(formatter)
 758          ch.setFormatter(formatter)
 759          # add the handlers to the logger
 760          self.log.addHandler(fh)
 761          self.log.addHandler(ch)
 762  
 763          if self.options.trace_rpc:
 764              rpc_logger = logging.getLogger("BitcoinRPC")
 765              rpc_logger.setLevel(logging.DEBUG)
 766              rpc_handler = logging.StreamHandler(sys.stdout)
 767              rpc_handler.setLevel(logging.DEBUG)
 768              rpc_logger.addHandler(rpc_handler)
 769  
 770      def _initialize_chain(self):
 771          """Initialize a pre-mined blockchain for use by the test.
 772  
 773          Create a cache of a 199-block-long chain
 774          Afterward, create num_nodes copies from the cache."""
 775  
 776          CACHE_NODE_ID = 0  # Use node 0 to create the cache for all other nodes
 777          cache_node_dir = get_datadir_path(self.options.cachedir, CACHE_NODE_ID)
 778          assert self.num_nodes <= MAX_NODES
 779  
 780          if not os.path.isdir(cache_node_dir):
 781              self.log.debug("Creating cache directory {}".format(cache_node_dir))
 782  
 783              initialize_datadir(self.options.cachedir, CACHE_NODE_ID, self.chain, self.disable_autoconnect)
 784              self.nodes.append(
 785                  TestNode(
 786                      CACHE_NODE_ID,
 787                      cache_node_dir,
 788                      chain=self.chain,
 789                      extra_conf=["bind=127.0.0.1"],
 790                      extra_args=[],
 791                      rpchost=None,
 792                      timewait=self.rpc_timeout,
 793                      timeout_factor=self.options.timeout_factor,
 794                      binaries=self.get_binaries(),
 795                      coverage_dir=None,
 796                      cwd=self.options.tmpdir,
 797                      uses_wallet=self.uses_wallet,
 798                  ))
 799              self.start_node(CACHE_NODE_ID)
 800              cache_node = self.nodes[CACHE_NODE_ID]
 801  
 802              # Wait for RPC connections to be ready
 803              cache_node.wait_for_rpc_connection()
 804  
 805              # Set a time in the past, so that blocks don't end up in the future
 806              cache_node.setmocktime(cache_node.getblockheader(cache_node.getbestblockhash())['time'])
 807  
 808              # Create a 199-block-long chain; each of the 3 first nodes
 809              # gets 25 mature blocks and 25 immature.
 810              # The 4th address gets 25 mature and only 24 immature blocks so that the very last
 811              # block in the cache does not age too much (have an old tip age).
 812              # This is needed so that we are out of IBD when the test starts,
 813              # see the tip age check in IsInitialBlockDownload().
 814              gen_addresses = [k.address for k in TestNode.PRIV_KEYS][:3] + [create_deterministic_address_bcrt1_p2tr_op_true()[0]]
 815              assert_equal(len(gen_addresses), 4)
 816              for i in range(8):
 817                  self.generatetoaddress(
 818                      cache_node,
 819                      nblocks=25 if i != 7 else 24,
 820                      address=gen_addresses[i % len(gen_addresses)],
 821                  )
 822  
 823              assert_equal(cache_node.getblockchaininfo()["blocks"], 199)
 824  
 825              # Shut it down, and clean up cache directories:
 826              self.stop_nodes()
 827              self.nodes = []
 828  
 829              def cache_path(*paths):
 830                  return os.path.join(cache_node_dir, self.chain, *paths)
 831  
 832              os.rmdir(cache_path('wallets'))  # Remove empty wallets dir
 833              for entry in os.listdir(cache_path()):
 834                  if entry not in ['chainstate', 'blocks', 'indexes']:  # Only indexes, chainstate and blocks folders
 835                      os.remove(cache_path(entry))
 836  
 837          for i in range(self.num_nodes):
 838              self.log.debug("Copy cache directory {} to node {}".format(cache_node_dir, i))
 839              to_dir = get_datadir_path(self.options.tmpdir, i)
 840              shutil.copytree(cache_node_dir, to_dir)
 841              initialize_datadir(self.options.tmpdir, i, self.chain, self.disable_autoconnect)  # Overwrite port/rpcport in bitcoin.conf
 842  
 843      def _initialize_chain_clean(self):
 844          """Initialize empty blockchain for use by the test.
 845  
 846          Create an empty blockchain and num_nodes wallets.
 847          Useful if a test case wants complete control over initialization."""
 848          for i in range(self.num_nodes):
 849              initialize_datadir(self.options.tmpdir, i, self.chain, self.disable_autoconnect)
 850  
 851      def skip_if_no_py3_zmq(self):
 852          """Attempt to import the zmq package and skip the test if the import fails."""
 853          try:
 854              import zmq  # noqa
 855          except ImportError:
 856              raise SkipTest("python3-zmq module not available.")
 857  
 858      def skip_if_no_py_sqlite3(self):
 859          """Attempt to import the sqlite3 package and skip the test if the import fails."""
 860          try:
 861              import sqlite3  # noqa
 862          except ImportError:
 863              raise SkipTest("sqlite3 module not available.")
 864  
 865      def skip_if_no_py_capnp(self):
 866          """Attempt to import the capnp package and skip the test if the import fails."""
 867          try:
 868              import capnp  # type: ignore[import] # noqa: F401
 869          except ImportError:
 870              raise SkipTest("capnp module not available.")
 871  
 872      def skip_if_no_python_bcc(self):
 873          """Attempt to import the bcc package and skip the tests if the import fails."""
 874          try:
 875              import bcc  # type: ignore[import] # noqa: F401
 876          except ImportError:
 877              raise SkipTest("bcc python module not available")
 878  
 879      def skip_if_no_bitcoind_tracepoints(self):
 880          """Skip the running test if bitcoind has not been compiled with USDT tracepoint support."""
 881          if not self.is_usdt_compiled():
 882              raise SkipTest("bitcoind has not been built with USDT tracepoints enabled.")
 883  
 884      def skip_if_no_bpf_permissions(self):
 885          """Skip the running test if we don't have permissions to do BPF syscalls and load BPF maps."""
 886          # check for 'root' permissions
 887          if os.geteuid() != 0:
 888              raise SkipTest("no permissions to use BPF (please review the tests carefully before running them with higher privileges)")
 889  
 890      def skip_if_platform_not_linux(self):
 891          """Skip the running test if we are not on a Linux platform"""
 892          if platform.system() != "Linux":
 893              raise SkipTest("not on a Linux system")
 894  
 895      def skip_if_platform_not_posix(self):
 896          """Skip the running test if we are not on a POSIX platform"""
 897          if os.name != 'posix':
 898              raise SkipTest("not on a POSIX system")
 899  
 900      def skip_if_no_bitcoind_zmq(self):
 901          """Skip the running test if bitcoind has not been compiled with zmq support."""
 902          if not self.is_zmq_compiled():
 903              raise SkipTest("bitcoind has not been built with zmq enabled.")
 904  
 905      def skip_if_no_wallet(self):
 906          """Skip the running test if wallet has not been compiled."""
 907          self.uses_wallet = True
 908          if not self.is_wallet_compiled():
 909              raise SkipTest("wallet has not been compiled.")
 910  
 911      def skip_if_no_wallet_tool(self):
 912          """Skip the running test if bitcoin-wallet has not been compiled."""
 913          if not self.is_wallet_tool_compiled():
 914              raise SkipTest("bitcoin-wallet has not been compiled")
 915  
 916      def skip_if_no_bitcoin_tx(self):
 917          """Skip the running test if bitcoin-tx has not been compiled."""
 918          if not self.is_bitcoin_tx_compiled():
 919              raise SkipTest("bitcoin-tx has not been compiled")
 920  
 921      def skip_if_no_bitcoin_util(self):
 922          """Skip the running test if bitcoin-util has not been compiled."""
 923          if not self.is_bitcoin_util_compiled():
 924              raise SkipTest("bitcoin-util has not been compiled")
 925  
 926      def skip_if_no_bitcoin_chainstate(self):
 927          """Skip the running test if bitcoin-chainstate has not been compiled."""
 928          if not self.is_bitcoin_chainstate_compiled():
 929              raise SkipTest("bitcoin-chainstate has not been compiled")
 930  
 931      def skip_if_no_bitcoin_bench(self):
 932          """Skip the running test if bench_bitcoin has not been compiled."""
 933          if not self.is_bench_compiled():
 934              raise SkipTest("bench_bitcoin has not been compiled")
 935  
 936      def skip_if_no_cli(self):
 937          """Skip the running test if bitcoin-cli has not been compiled."""
 938          if not self.is_cli_compiled():
 939              raise SkipTest("bitcoin-cli has not been compiled.")
 940  
 941      def skip_if_no_ipc(self):
 942          """Skip the running test if ipc is not compiled."""
 943          if not self.is_ipc_compiled():
 944              raise SkipTest("ipc has not been compiled.")
 945  
 946      def skip_if_no_previous_releases(self):
 947          """Skip the running test if previous releases are not available."""
 948          if not self.has_previous_releases():
 949              raise SkipTest("previous releases not available or disabled")
 950  
 951      def has_previous_releases(self):
 952          """Checks whether previous releases are present and enabled."""
 953          if not os.path.isdir(self.options.previous_releases_path):
 954              if self.options.prev_releases:
 955                  raise AssertionError(f"Force test of previous releases but releases missing: {self.options.previous_releases_path}\n"
 956                                       "Previous releases binaries can be downloaded via `test/get_previous_releases.py`.")
 957          return self.options.prev_releases
 958  
 959      def skip_if_no_external_signer(self):
 960          """Skip the running test if external signer support has not been compiled."""
 961          if not self.is_external_signer_compiled():
 962              raise SkipTest("external signer support has not been compiled.")
 963  
 964      def skip_if_running_under_valgrind(self):
 965          """Skip the running test if Valgrind is being used."""
 966          if self.options.valgrind:
 967              raise SkipTest("This test is not compatible with Valgrind.")
 968  
 969      def is_bench_compiled(self):
 970          """Checks whether bench_bitcoin was compiled."""
 971          return self.config["components"].getboolean("BUILD_BENCH")
 972  
 973      def is_cli_compiled(self):
 974          """Checks whether bitcoin-cli was compiled."""
 975          return self.config["components"].getboolean("ENABLE_CLI")
 976  
 977      def is_external_signer_compiled(self):
 978          """Checks whether external signer support was compiled."""
 979          return self.config["components"].getboolean("ENABLE_EXTERNAL_SIGNER")
 980  
 981      def is_wallet_compiled(self):
 982          """Checks whether the wallet module was compiled."""
 983          return self.config["components"].getboolean("ENABLE_WALLET")
 984  
 985      def is_wallet_tool_compiled(self):
 986          """Checks whether bitcoin-wallet was compiled."""
 987          return self.config["components"].getboolean("ENABLE_WALLET_TOOL")
 988  
 989      def is_bitcoin_tx_compiled(self):
 990          """Checks whether bitcoin-tx was compiled."""
 991          return self.config["components"].getboolean("BUILD_BITCOIN_TX")
 992  
 993      def is_bitcoin_util_compiled(self):
 994          """Checks whether bitcoin-util was compiled."""
 995          return self.config["components"].getboolean("ENABLE_BITCOIN_UTIL")
 996  
 997      def is_bitcoin_chainstate_compiled(self):
 998          """Checks whether bitcoin-chainstate was compiled."""
 999          return self.config["components"].getboolean("ENABLE_BITCOIN_CHAINSTATE")
1000  
1001      def is_zmq_compiled(self):
1002          """Checks whether the zmq module was compiled."""
1003          return self.config["components"].getboolean("ENABLE_ZMQ")
1004  
1005      def is_embedded_asmap_compiled(self):
1006          """Checks whether ASMap data was embedded during compilation."""
1007          return self.config["components"].getboolean("ENABLE_EMBEDDED_ASMAP")
1008  
1009      def is_usdt_compiled(self):
1010          """Checks whether the USDT tracepoints were compiled."""
1011          return self.config["components"].getboolean("ENABLE_USDT_TRACEPOINTS")
1012  
1013      def is_ipc_compiled(self):
1014          """Checks whether ipc was compiled."""
1015          return self.config["components"].getboolean("ENABLE_IPC")
1016  
1017      def has_blockfile(self, node, filenum: str):
1018          return (node.blocks_path/ f"blk{filenum}.dat").is_file()
1019  
1020      def inspect_sqlite_db(self, path, fn, *args, **kwargs):
1021          try:
1022              import sqlite3 # type: ignore[import]
1023              conn = sqlite3.connect(path)
1024              with conn:
1025                  result = fn(conn, *args, **kwargs)
1026              conn.close()
1027              return result
1028          except ImportError:
1029              self.log.warning("sqlite3 module not available, skipping tests that inspect the database")
1030  
1031      def cleanup_folder(self, _path):
1032          path = Path(_path)
1033          if not path.is_relative_to(self.options.tmpdir):
1034              raise AssertionError(f"Trying to delete #{path} outside of #{self.options.tmpdir}")
1035          shutil.rmtree(path)