/ src / net_processing.cpp
net_processing.cpp
   1  // Copyright (c) 2009-2010 Satoshi Nakamoto
   2  // Copyright (c) 2009-present The Bitcoin Core developers
   3  // Distributed under the MIT software license, see the accompanying
   4  // file COPYING or http://www.opensource.org/licenses/mit-license.php.
   5  
   6  #include <net_processing.h>
   7  
   8  #include <addrman.h>
   9  #include <arith_uint256.h>
  10  #include <banman.h>
  11  #include <blockencodings.h>
  12  #include <blockfilter.h>
  13  #include <chain.h>
  14  #include <chainparams.h>
  15  #include <common/bloom.h>
  16  #include <consensus/amount.h>
  17  #include <consensus/params.h>
  18  #include <consensus/validation.h>
  19  #include <core_memusage.h>
  20  #include <crypto/siphash.h>
  21  #include <deploymentstatus.h>
  22  #include <flatfile.h>
  23  #include <headerssync.h>
  24  #include <index/blockfilterindex.h>
  25  #include <kernel/chain.h>
  26  #include <logging.h>
  27  #include <merkleblock.h>
  28  #include <net.h>
  29  #include <net_permissions.h>
  30  #include <netaddress.h>
  31  #include <netbase.h>
  32  #include <netmessagemaker.h>
  33  #include <node/blockstorage.h>
  34  #include <node/connection_types.h>
  35  #include <node/protocol_version.h>
  36  #include <node/timeoffsets.h>
  37  #include <node/txdownloadman.h>
  38  #include <node/txreconciliation.h>
  39  #include <node/warnings.h>
  40  #include <policy/feerate.h>
  41  #include <policy/fees.h>
  42  #include <policy/packages.h>
  43  #include <policy/policy.h>
  44  #include <primitives/block.h>
  45  #include <primitives/transaction.h>
  46  #include <protocol.h>
  47  #include <random.h>
  48  #include <scheduler.h>
  49  #include <script/script.h>
  50  #include <serialize.h>
  51  #include <span.h>
  52  #include <streams.h>
  53  #include <sync.h>
  54  #include <tinyformat.h>
  55  #include <txmempool.h>
  56  #include <txorphanage.h>
  57  #include <uint256.h>
  58  #include <util/check.h>
  59  #include <util/strencodings.h>
  60  #include <util/time.h>
  61  #include <util/trace.h>
  62  #include <validation.h>
  63  
  64  #include <algorithm>
  65  #include <array>
  66  #include <atomic>
  67  #include <compare>
  68  #include <cstddef>
  69  #include <deque>
  70  #include <exception>
  71  #include <functional>
  72  #include <future>
  73  #include <initializer_list>
  74  #include <iterator>
  75  #include <limits>
  76  #include <list>
  77  #include <map>
  78  #include <memory>
  79  #include <optional>
  80  #include <queue>
  81  #include <ranges>
  82  #include <ratio>
  83  #include <set>
  84  #include <span>
  85  #include <typeinfo>
  86  #include <utility>
  87  
  88  using namespace util::hex_literals;
  89  
  90  TRACEPOINT_SEMAPHORE(net, inbound_message);
  91  TRACEPOINT_SEMAPHORE(net, misbehaving_connection);
  92  
  93  /** Headers download timeout.
  94   *  Timeout = base + per_header * (expected number of headers) */
  95  static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
  96  static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
  97  /** How long to wait for a peer to respond to a getheaders request */
  98  static constexpr auto HEADERS_RESPONSE_TIME{2min};
  99  /** Protect at least this many outbound peers from disconnection due to slow/
 100   * behind headers chain.
 101   */
 102  static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
 103  /** Timeout for (unprotected) outbound peers to sync to our chainwork */
 104  static constexpr auto CHAIN_SYNC_TIMEOUT{20min};
 105  /** How frequently to check for stale tips */
 106  static constexpr auto STALE_CHECK_INTERVAL{10min};
 107  /** How frequently to check for extra outbound peers and disconnect */
 108  static constexpr auto EXTRA_PEER_CHECK_INTERVAL{45s};
 109  /** Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict */
 110  static constexpr auto MINIMUM_CONNECT_TIME{30s};
 111  /** SHA256("main address relay")[0:8] */
 112  static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
 113  /// Age after which a stale block will no longer be served if requested as
 114  /// protection against fingerprinting. Set to one month, denominated in seconds.
 115  static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
 116  /// Age after which a block is considered historical for purposes of rate
 117  /// limiting block relay. Set to one week, denominated in seconds.
 118  static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
 119  /** Time between pings automatically sent out for latency probing and keepalive */
 120  static constexpr auto PING_INTERVAL{2min};
 121  /** The maximum number of entries in a locator */
 122  static const unsigned int MAX_LOCATOR_SZ = 101;
 123  /** The maximum number of entries in an 'inv' protocol message */
 124  static const unsigned int MAX_INV_SZ = 50000;
 125  /** Limit to avoid sending big packets. Not used in processing incoming GETDATA for compatibility */
 126  static const unsigned int MAX_GETDATA_SZ = 1000;
 127  /** Number of blocks that can be requested at any given time from a single peer. */
 128  static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
 129  /** Default time during which a peer must stall block download progress before being disconnected.
 130   * the actual timeout is increased temporarily if peers are disconnected for hitting the timeout */
 131  static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s};
 132  /** Maximum timeout for stalling block download. */
 133  static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s};
 134  /** Maximum depth of blocks we're willing to serve as compact blocks to peers
 135   *  when requested. For older blocks, a regular BLOCK response will be sent. */
 136  static const int MAX_CMPCTBLOCK_DEPTH = 5;
 137  /** Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for. */
 138  static const int MAX_BLOCKTXN_DEPTH = 10;
 139  static_assert(MAX_BLOCKTXN_DEPTH <= MIN_BLOCKS_TO_KEEP, "MAX_BLOCKTXN_DEPTH too high");
 140  /** Size of the "block download window": how far ahead of our current height do we fetch?
 141   *  Larger windows tolerate larger download speed differences between peer, but increase the potential
 142   *  degree of disordering of blocks on disk (which make reindexing and pruning harder). We'll probably
 143   *  want to make this a per-peer adaptive value at some point. */
 144  static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
 145  /** Block download timeout base, expressed in multiples of the block interval (i.e. 10 min) */
 146  static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1;
 147  /** Additional block download timeout per parallel downloading peer (i.e. 5 min) */
 148  static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
 149  /** Maximum number of headers to announce when relaying blocks with headers message.*/
 150  static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
 151  /** Minimum blocks required to signal NODE_NETWORK_LIMITED */
 152  static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
 153  /** Window, in blocks, for connecting to NODE_NETWORK_LIMITED peers */
 154  static const unsigned int NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS = 144;
 155  /** Average delay between local address broadcasts */
 156  static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24h};
 157  /** Average delay between peer address broadcasts */
 158  static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL{30s};
 159  /** Delay between rotating the peers we relay a particular address to */
 160  static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL{24h};
 161  /** Average delay between trickled inventory transmissions for inbound peers.
 162   *  Blocks and peers with NetPermissionFlags::NoBan permission bypass this. */
 163  static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL{5s};
 164  /** Average delay between trickled inventory transmissions for outbound peers.
 165   *  Use a smaller delay as there is less privacy concern for them.
 166   *  Blocks and peers with NetPermissionFlags::NoBan permission bypass this. */
 167  static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL{2s};
 168  /** Maximum rate of inventory items to send per second.
 169   *  Limits the impact of low-fee transaction floods. */
 170  static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
 171  /** Target number of tx inventory items to send per transmission. */
 172  static constexpr unsigned int INVENTORY_BROADCAST_TARGET = INVENTORY_BROADCAST_PER_SECOND * count_seconds(INBOUND_INVENTORY_BROADCAST_INTERVAL);
 173  /** Maximum number of inventory items to send per transmission. */
 174  static constexpr unsigned int INVENTORY_BROADCAST_MAX = 1000;
 175  static_assert(INVENTORY_BROADCAST_MAX >= INVENTORY_BROADCAST_TARGET, "INVENTORY_BROADCAST_MAX too low");
 176  static_assert(INVENTORY_BROADCAST_MAX <= node::MAX_PEER_TX_ANNOUNCEMENTS, "INVENTORY_BROADCAST_MAX too high");
 177  /** Average delay between feefilter broadcasts in seconds. */
 178  static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL{10min};
 179  /** Maximum feefilter broadcast delay after significant change. */
 180  static constexpr auto MAX_FEEFILTER_CHANGE_DELAY{5min};
 181  /** Maximum number of compact filters that may be requested with one getcfilters. See BIP 157. */
 182  static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
 183  /** Maximum number of cf hashes that may be requested with one getcfheaders. See BIP 157. */
 184  static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
 185  /** the maximum percentage of addresses from our addrman to return in response to a getaddr message. */
 186  static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
 187  /** The maximum number of address records permitted in an ADDR message. */
 188  static constexpr size_t MAX_ADDR_TO_SEND{1000};
 189  /** The maximum rate of address records we're willing to process on average. Can be bypassed using
 190   *  the NetPermissionFlags::Addr permission. */
 191  static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1};
 192  /** The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND
 193   *  based increments won't go above this, but the MAX_ADDR_TO_SEND increment following GETADDR
 194   *  is exempt from this limit). */
 195  static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET{MAX_ADDR_TO_SEND};
 196  /** The compactblocks version we support. See BIP 152. */
 197  static constexpr uint64_t CMPCTBLOCKS_VERSION{2};
 198  
 199  // Internal stuff
 200  namespace {
 201  /** Blocks that are in flight, and that are in the queue to be downloaded. */
 202  struct QueuedBlock {
 203      /** BlockIndex. We must have this since we only request blocks when we've already validated the header. */
 204      const CBlockIndex* pindex;
 205      /** Optional, used for CMPCTBLOCK downloads */
 206      std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
 207  };
 208  
 209  /**
 210   * Data structure for an individual peer. This struct is not protected by
 211   * cs_main since it does not contain validation-critical data.
 212   *
 213   * Memory is owned by shared pointers and this object is destructed when
 214   * the refcount drops to zero.
 215   *
 216   * Mutexes inside this struct must not be held when locking m_peer_mutex.
 217   *
 218   * TODO: move most members from CNodeState to this structure.
 219   * TODO: move remaining application-layer data members from CNode to this structure.
 220   */
 221  struct Peer {
 222      /** Same id as the CNode object for this peer */
 223      const NodeId m_id{0};
 224  
 225      /** Services we offered to this peer.
 226       *
 227       *  This is supplied by CConnman during peer initialization. It's const
 228       *  because there is no protocol defined for renegotiating services
 229       *  initially offered to a peer. The set of local services we offer should
 230       *  not change after initialization.
 231       *
 232       *  An interesting example of this is NODE_NETWORK and initial block
 233       *  download: a node which starts up from scratch doesn't have any blocks
 234       *  to serve, but still advertises NODE_NETWORK because it will eventually
 235       *  fulfill this role after IBD completes. P2P code is written in such a
 236       *  way that it can gracefully handle peers who don't make good on their
 237       *  service advertisements. */
 238      const ServiceFlags m_our_services;
 239      /** Services this peer offered to us. */
 240      std::atomic<ServiceFlags> m_their_services{NODE_NONE};
 241  
 242      //! Whether this peer is an inbound connection
 243      const bool m_is_inbound;
 244  
 245      /** Protects misbehavior data members */
 246      Mutex m_misbehavior_mutex;
 247      /** Whether this peer should be disconnected and marked as discouraged (unless it has NetPermissionFlags::NoBan permission). */
 248      bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
 249  
 250      /** Protects block inventory data members */
 251      Mutex m_block_inv_mutex;
 252      /** List of blocks that we'll announce via an `inv` message.
 253       * There is no final sorting before sending, as they are always sent
 254       * immediately and in the order requested. */
 255      std::vector<uint256> m_blocks_for_inv_relay GUARDED_BY(m_block_inv_mutex);
 256      /** Unfiltered list of blocks that we'd like to announce via a `headers`
 257       * message. If we can't announce via a `headers` message, we'll fall back to
 258       * announcing via `inv`. */
 259      std::vector<uint256> m_blocks_for_headers_relay GUARDED_BY(m_block_inv_mutex);
 260      /** The final block hash that we sent in an `inv` message to this peer.
 261       * When the peer requests this block, we send an `inv` message to trigger
 262       * the peer to request the next sequence of block hashes.
 263       * Most peers use headers-first syncing, which doesn't use this mechanism */
 264      uint256 m_continuation_block GUARDED_BY(m_block_inv_mutex) {};
 265  
 266      /** Set to true once initial VERSION message was sent (only relevant for outbound peers). */
 267      bool m_outbound_version_message_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
 268  
 269      /** This peer's reported block height when we connected */
 270      std::atomic<int> m_starting_height{-1};
 271  
 272      /** The pong reply we're expecting, or 0 if no pong expected. */
 273      std::atomic<uint64_t> m_ping_nonce_sent{0};
 274      /** When the last ping was sent, or 0 if no ping was ever sent */
 275      std::atomic<std::chrono::microseconds> m_ping_start{0us};
 276      /** Whether a ping has been requested by the user */
 277      std::atomic<bool> m_ping_queued{false};
 278  
 279      /** Whether this peer relays txs via wtxid */
 280      std::atomic<bool> m_wtxid_relay{false};
 281      /** The feerate in the most recent BIP133 `feefilter` message sent to the peer.
 282       *  It is *not* a p2p protocol violation for the peer to send us
 283       *  transactions with a lower fee rate than this. See BIP133. */
 284      CAmount m_fee_filter_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0};
 285      /** Timestamp after which we will send the next BIP133 `feefilter` message
 286        * to the peer. */
 287      std::chrono::microseconds m_next_send_feefilter GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0};
 288  
 289      struct TxRelay {
 290          mutable RecursiveMutex m_bloom_filter_mutex;
 291          /** Whether we relay transactions to this peer. */
 292          bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false};
 293          /** A bloom filter for which transactions to announce to the peer. See BIP37. */
 294          std::unique_ptr<CBloomFilter> m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex) GUARDED_BY(m_bloom_filter_mutex){nullptr};
 295  
 296          mutable RecursiveMutex m_tx_inventory_mutex;
 297          /** A filter of all the (w)txids that the peer has announced to
 298           *  us or we have announced to the peer. We use this to avoid announcing
 299           *  the same (w)txid to a peer that already has the transaction. */
 300          CRollingBloomFilter m_tx_inventory_known_filter GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001};
 301          /** Set of transaction ids we still have to announce (txid for
 302           *  non-wtxid-relay peers, wtxid for wtxid-relay peers). We use the
 303           *  mempool to sort transactions in dependency order before relay, so
 304           *  this does not have to be sorted. */
 305          std::set<uint256> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex);
 306          /** Whether the peer has requested us to send our complete mempool. Only
 307           *  permitted if the peer has NetPermissionFlags::Mempool or we advertise
 308           *  NODE_BLOOM. See BIP35. */
 309          bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false};
 310          /** The next time after which we will send an `inv` message containing
 311           *  transaction announcements to this peer. */
 312          std::chrono::microseconds m_next_inv_send_time GUARDED_BY(m_tx_inventory_mutex){0};
 313          /** The mempool sequence num at which we sent the last `inv` message to this peer.
 314           *  Can relay txs with lower sequence numbers than this (see CTxMempool::info_for_relay). */
 315          uint64_t m_last_inv_sequence GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1};
 316  
 317          /** Minimum fee rate with which to filter transaction announcements to this node. See BIP133. */
 318          std::atomic<CAmount> m_fee_filter_received{0};
 319      };
 320  
 321      /* Initializes a TxRelay struct for this peer. Can be called at most once for a peer. */
 322      TxRelay* SetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex)
 323      {
 324          LOCK(m_tx_relay_mutex);
 325          Assume(!m_tx_relay);
 326          m_tx_relay = std::make_unique<Peer::TxRelay>();
 327          return m_tx_relay.get();
 328      };
 329  
 330      TxRelay* GetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex)
 331      {
 332          return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get());
 333      };
 334  
 335      /** A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND. */
 336      std::vector<CAddress> m_addrs_to_send GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
 337      /** Probabilistic filter to track recent addr messages relayed with this
 338       *  peer. Used to avoid relaying redundant addresses to this peer.
 339       *
 340       *  We initialize this filter for outbound peers (other than
 341       *  block-relay-only connections) or when an inbound peer sends us an
 342       *  address related message (ADDR, ADDRV2, GETADDR).
 343       *
 344       *  Presence of this filter must correlate with m_addr_relay_enabled.
 345       **/
 346      std::unique_ptr<CRollingBloomFilter> m_addr_known GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
 347      /** Whether we are participating in address relay with this connection.
 348       *
 349       *  We set this bool to true for outbound peers (other than
 350       *  block-relay-only connections), or when an inbound peer sends us an
 351       *  address related message (ADDR, ADDRV2, GETADDR).
 352       *
 353       *  We use this bool to decide whether a peer is eligible for gossiping
 354       *  addr messages. This avoids relaying to peers that are unlikely to
 355       *  forward them, effectively blackholing self announcements. Reasons
 356       *  peers might support addr relay on the link include that they connected
 357       *  to us as a block-relay-only peer or they are a light client.
 358       *
 359       *  This field must correlate with whether m_addr_known has been
 360       *  initialized.*/
 361      std::atomic_bool m_addr_relay_enabled{false};
 362      /** Whether a getaddr request to this peer is outstanding. */
 363      bool m_getaddr_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
 364      /** Guards address sending timers. */
 365      mutable Mutex m_addr_send_times_mutex;
 366      /** Time point to send the next ADDR message to this peer. */
 367      std::chrono::microseconds m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
 368      /** Time point to possibly re-announce our local address to this peer. */
 369      std::chrono::microseconds m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
 370      /** Whether the peer has signaled support for receiving ADDRv2 (BIP155)
 371       *  messages, indicating a preference to receive ADDRv2 instead of ADDR ones. */
 372      std::atomic_bool m_wants_addrv2{false};
 373      /** Whether this peer has already sent us a getaddr message. */
 374      bool m_getaddr_recvd GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
 375      /** Number of addresses that can be processed from this peer. Start at 1 to
 376       *  permit self-announcement. */
 377      double m_addr_token_bucket GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1.0};
 378      /** When m_addr_token_bucket was last updated */
 379      std::chrono::microseconds m_addr_token_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){GetTime<std::chrono::microseconds>()};
 380      /** Total number of addresses that were dropped due to rate limiting. */
 381      std::atomic<uint64_t> m_addr_rate_limited{0};
 382      /** Total number of addresses that were processed (excludes rate-limited ones). */
 383      std::atomic<uint64_t> m_addr_processed{0};
 384  
 385      /** Whether we've sent this peer a getheaders in response to an inv prior to initial-headers-sync completing */
 386      bool m_inv_triggered_getheaders_before_sync GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
 387  
 388      /** Protects m_getdata_requests **/
 389      Mutex m_getdata_requests_mutex;
 390      /** Work queue of items requested by this peer **/
 391      std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
 392  
 393      /** Time of the last getheaders message to this peer */
 394      NodeClock::time_point m_last_getheaders_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){};
 395  
 396      /** Protects m_headers_sync **/
 397      Mutex m_headers_sync_mutex;
 398      /** Headers-sync state for this peer (eg for initial sync, or syncing large
 399       * reorgs) **/
 400      std::unique_ptr<HeadersSyncState> m_headers_sync PT_GUARDED_BY(m_headers_sync_mutex) GUARDED_BY(m_headers_sync_mutex) {};
 401  
 402      /** Whether we've sent our peer a sendheaders message. **/
 403      std::atomic<bool> m_sent_sendheaders{false};
 404  
 405      /** When to potentially disconnect peer for stalling headers download */
 406      std::chrono::microseconds m_headers_sync_timeout GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0us};
 407  
 408      /** Whether this peer wants invs or headers (when possible) for block announcements */
 409      bool m_prefers_headers GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
 410  
 411      /** Time offset computed during the version handshake based on the
 412       * timestamp the peer sent in the version message. */
 413      std::atomic<std::chrono::seconds> m_time_offset{0s};
 414  
 415      explicit Peer(NodeId id, ServiceFlags our_services, bool is_inbound)
 416          : m_id{id}
 417          , m_our_services{our_services}
 418          , m_is_inbound{is_inbound}
 419      {}
 420  
 421  private:
 422      mutable Mutex m_tx_relay_mutex;
 423  
 424      /** Transaction relay data. May be a nullptr. */
 425      std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY(m_tx_relay_mutex);
 426  };
 427  
 428  using PeerRef = std::shared_ptr<Peer>;
 429  
 430  /**
 431   * Maintain validation-specific state about nodes, protected by cs_main, instead
 432   * by CNode's own locks. This simplifies asynchronous operation, where
 433   * processing of incoming data is done after the ProcessMessage call returns,
 434   * and we're no longer holding the node's locks.
 435   */
 436  struct CNodeState {
 437      //! The best known block we know this peer has announced.
 438      const CBlockIndex* pindexBestKnownBlock{nullptr};
 439      //! The hash of the last unknown block this peer has announced.
 440      uint256 hashLastUnknownBlock{};
 441      //! The last full block we both have.
 442      const CBlockIndex* pindexLastCommonBlock{nullptr};
 443      //! The best header we have sent our peer.
 444      const CBlockIndex* pindexBestHeaderSent{nullptr};
 445      //! Whether we've started headers synchronization with this peer.
 446      bool fSyncStarted{false};
 447      //! Since when we're stalling block download progress (in microseconds), or 0.
 448      std::chrono::microseconds m_stalling_since{0us};
 449      std::list<QueuedBlock> vBlocksInFlight;
 450      //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty.
 451      std::chrono::microseconds m_downloading_since{0us};
 452      //! Whether we consider this a preferred download peer.
 453      bool fPreferredDownload{false};
 454      /** Whether this peer wants invs or cmpctblocks (when possible) for block announcements. */
 455      bool m_requested_hb_cmpctblocks{false};
 456      /** Whether this peer will send us cmpctblocks if we request them. */
 457      bool m_provides_cmpctblocks{false};
 458  
 459      /** State used to enforce CHAIN_SYNC_TIMEOUT and EXTRA_PEER_CHECK_INTERVAL logic.
 460        *
 461        * Both are only in effect for outbound, non-manual, non-protected connections.
 462        * Any peer protected (m_protect = true) is not chosen for eviction. A peer is
 463        * marked as protected if all of these are true:
 464        *   - its connection type is IsBlockOnlyConn() == false
 465        *   - it gave us a valid connecting header
 466        *   - we haven't reached MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT yet
 467        *   - its chain tip has at least as much work as ours
 468        *
 469        * CHAIN_SYNC_TIMEOUT: if a peer's best known block has less work than our tip,
 470        * set a timeout CHAIN_SYNC_TIMEOUT in the future:
 471        *   - If at timeout their best known block now has more work than our tip
 472        *     when the timeout was set, then either reset the timeout or clear it
 473        *     (after comparing against our current tip's work)
 474        *   - If at timeout their best known block still has less work than our
 475        *     tip did when the timeout was set, then send a getheaders message,
 476        *     and set a shorter timeout, HEADERS_RESPONSE_TIME seconds in future.
 477        *     If their best known block is still behind when that new timeout is
 478        *     reached, disconnect.
 479        *
 480        * EXTRA_PEER_CHECK_INTERVAL: after each interval, if we have too many outbound peers,
 481        * drop the outbound one that least recently announced us a new block.
 482        */
 483      struct ChainSyncTimeoutState {
 484          //! A timeout used for checking whether our peer has sufficiently synced
 485          std::chrono::seconds m_timeout{0s};
 486          //! A header with the work we require on our peer's chain
 487          const CBlockIndex* m_work_header{nullptr};
 488          //! After timeout is reached, set to true after sending getheaders
 489          bool m_sent_getheaders{false};
 490          //! Whether this peer is protected from disconnection due to a bad/slow chain
 491          bool m_protect{false};
 492      };
 493  
 494      ChainSyncTimeoutState m_chain_sync;
 495  
 496      //! Time of last new block announcement
 497      int64_t m_last_block_announcement{0};
 498  };
 499  
 500  class PeerManagerImpl final : public PeerManager
 501  {
 502  public:
 503      PeerManagerImpl(CConnman& connman, AddrMan& addrman,
 504                      BanMan* banman, ChainstateManager& chainman,
 505                      CTxMemPool& pool, node::Warnings& warnings, Options opts);
 506  
 507      /** Overridden from CValidationInterface. */
 508      void ActiveTipChange(const CBlockIndex& new_tip, bool) override
 509          EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex);
 510      void BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override
 511          EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex);
 512      void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override
 513          EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex);
 514      void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override
 515          EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 516      void BlockChecked(const CBlock& block, const BlockValidationState& state) override
 517          EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 518      void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) override
 519          EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
 520  
 521      /** Implement NetEventsInterface */
 522      void InitializeNode(const CNode& node, ServiceFlags our_services) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_tx_download_mutex);
 523      void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, !m_tx_download_mutex);
 524      bool HasAllDesirableServiceFlags(ServiceFlags services) const override;
 525      bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override
 526          EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex, !m_tx_download_mutex);
 527      bool SendMessages(CNode* pto) override
 528          EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, g_msgproc_mutex, !m_tx_download_mutex);
 529  
 530      /** Implement PeerManager */
 531      void StartScheduledTasks(CScheduler& scheduler) override;
 532      void CheckForStaleTipAndEvictPeers() override;
 533      std::optional<std::string> FetchBlock(NodeId peer_id, const CBlockIndex& block_index) override
 534          EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 535      bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 536      std::vector<TxOrphanage::OrphanTxBase> GetOrphanTransactions() override EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex);
 537      PeerManagerInfo GetInfo() const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 538      void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 539      void RelayTransaction(const uint256& txid, const uint256& wtxid) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 540      void SetBestBlock(int height, std::chrono::seconds time) override
 541      {
 542          m_best_height = height;
 543          m_best_block_time = time;
 544      };
 545      void UnitTestMisbehaving(NodeId peer_id) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) { Misbehaving(*Assert(GetPeerRef(peer_id)), ""); };
 546      void ProcessMessage(CNode& pfrom, const std::string& msg_type, DataStream& vRecv,
 547                          const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override
 548          EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex, !m_tx_download_mutex);
 549      void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) override;
 550      ServiceFlags GetDesirableServiceFlags(ServiceFlags services) const override;
 551  
 552  private:
 553      /** Consider evicting an outbound peer based on the amount of time they've been behind our tip */
 554      void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_msgproc_mutex);
 555  
 556      /** If we have extra outbound peers, try to disconnect the one with the oldest block announcement */
 557      void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 558  
 559      /** Retrieve unbroadcast transactions from the mempool and reattempt sending to peers */
 560      void ReattemptInitialBroadcast(CScheduler& scheduler) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 561  
 562      /** Get a shared pointer to the Peer object.
 563       *  May return an empty shared_ptr if the Peer object can't be found. */
 564      PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 565  
 566      /** Get a shared pointer to the Peer object and remove it from m_peer_map.
 567       *  May return an empty shared_ptr if the Peer object can't be found. */
 568      PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 569  
 570      /** Mark a peer as misbehaving, which will cause it to be disconnected and its
 571       *  address discouraged. */
 572      void Misbehaving(Peer& peer, const std::string& message);
 573  
 574      /**
 575       * Potentially mark a node discouraged based on the contents of a BlockValidationState object
 576       *
 577       * @param[in] via_compact_block this bool is passed in because net_processing should
 578       * punish peers differently depending on whether the data was provided in a compact
 579       * block message or not. If the compact block had a valid header, but contained invalid
 580       * txs, the peer should not be punished. See BIP 152.
 581       */
 582      void MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
 583                                   bool via_compact_block, const std::string& message = "")
 584          EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 585  
 586      /**
 587       * Potentially disconnect and discourage a node based on the contents of a TxValidationState object
 588       */
 589      void MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state)
 590          EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 591  
 592      /** Maybe disconnect a peer and discourage future connections from its address.
 593       *
 594       * @param[in]   pnode     The node to check.
 595       * @param[in]   peer      The peer object to check.
 596       * @return                True if the peer was marked for disconnection in this function
 597       */
 598      bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer);
 599  
 600      /** Handle a transaction whose result was not MempoolAcceptResult::ResultType::VALID.
 601       * @param[in]   first_time_failure            Whether we should consider inserting into vExtraTxnForCompact, adding
 602       *                                            a new orphan to resolve, or looking for a package to submit.
 603       *                                            Set to true for transactions just received over p2p.
 604       *                                            Set to false if the tx has already been rejected before,
 605       *                                            e.g. is already in the orphanage, to avoid adding duplicate entries.
 606       * Updates m_txrequest, m_lazy_recent_rejects, m_lazy_recent_rejects_reconsiderable, m_orphanage, and vExtraTxnForCompact.
 607       *
 608       * @returns a PackageToValidate if this transaction has a reconsiderable failure and an eligible package was found,
 609       * or std::nullopt otherwise.
 610       */
 611      std::optional<node::PackageToValidate> ProcessInvalidTx(NodeId nodeid, const CTransactionRef& tx, const TxValidationState& result,
 612                                                        bool first_time_failure)
 613          EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex);
 614  
 615      /** Handle a transaction whose result was MempoolAcceptResult::ResultType::VALID.
 616       * Updates m_txrequest, m_orphanage, and vExtraTxnForCompact. Also queues the tx for relay. */
 617      void ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list<CTransactionRef>& replaced_transactions)
 618          EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex);
 619  
 620      /** Handle the results of package validation: calls ProcessValidTx and ProcessInvalidTx for
 621       * individual transactions, and caches rejection for the package as a group.
 622       */
 623      void ProcessPackageResult(const node::PackageToValidate& package_to_validate, const PackageMempoolAcceptResult& package_result)
 624          EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex);
 625  
 626      /**
 627       * Reconsider orphan transactions after a parent has been accepted to the mempool.
 628       *
 629       * @peer[in]  peer     The peer whose orphan transactions we will reconsider. Generally only
 630       *                     one orphan will be reconsidered on each call of this function. If an
 631       *                     accepted orphan has orphaned children, those will need to be
 632       *                     reconsidered, creating more work, possibly for other peers.
 633       * @return             True if meaningful work was done (an orphan was accepted/rejected).
 634       *                     If no meaningful work was done, then the work set for this peer
 635       *                     will be empty.
 636       */
 637      bool ProcessOrphanTx(Peer& peer)
 638          EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, !m_tx_download_mutex);
 639  
 640      /** Process a single headers message from a peer.
 641       *
 642       * @param[in]   pfrom     CNode of the peer
 643       * @param[in]   peer      The peer sending us the headers
 644       * @param[in]   headers   The headers received. Note that this may be modified within ProcessHeadersMessage.
 645       * @param[in]   via_compact_block   Whether this header came in via compact block handling.
 646      */
 647      void ProcessHeadersMessage(CNode& pfrom, Peer& peer,
 648                                 std::vector<CBlockHeader>&& headers,
 649                                 bool via_compact_block)
 650          EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
 651      /** Various helpers for headers processing, invoked by ProcessHeadersMessage() */
 652      /** Return true if headers are continuous and have valid proof-of-work (DoS points assigned on failure) */
 653      bool CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer);
 654      /** Calculate an anti-DoS work threshold for headers chains */
 655      arith_uint256 GetAntiDoSWorkThreshold();
 656      /** Deal with state tracking and headers sync for peers that send
 657       * non-connecting headers (this can happen due to BIP 130 headers
 658       * announcements for blocks interacting with the 2hr (MAX_FUTURE_BLOCK_TIME) rule). */
 659      void HandleUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector<CBlockHeader>& headers) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
 660      /** Return true if the headers connect to each other, false otherwise */
 661      bool CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const;
 662      /** Try to continue a low-work headers sync that has already begun.
 663       * Assumes the caller has already verified the headers connect, and has
 664       * checked that each header satisfies the proof-of-work target included in
 665       * the header.
 666       *  @param[in]  peer                            The peer we're syncing with.
 667       *  @param[in]  pfrom                           CNode of the peer
 668       *  @param[in,out] headers                      The headers to be processed.
 669       *  @return     True if the passed in headers were successfully processed
 670       *              as the continuation of a low-work headers sync in progress;
 671       *              false otherwise.
 672       *              If false, the passed in headers will be returned back to
 673       *              the caller.
 674       *              If true, the returned headers may be empty, indicating
 675       *              there is no more work for the caller to do; or the headers
 676       *              may be populated with entries that have passed anti-DoS
 677       *              checks (and therefore may be validated for block index
 678       *              acceptance by the caller).
 679       */
 680      bool IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom,
 681              std::vector<CBlockHeader>& headers)
 682          EXCLUSIVE_LOCKS_REQUIRED(peer.m_headers_sync_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
 683      /** Check work on a headers chain to be processed, and if insufficient,
 684       * initiate our anti-DoS headers sync mechanism.
 685       *
 686       * @param[in]   peer                The peer whose headers we're processing.
 687       * @param[in]   pfrom               CNode of the peer
 688       * @param[in]   chain_start_header  Where these headers connect in our index.
 689       * @param[in,out]   headers             The headers to be processed.
 690       *
 691       * @return      True if chain was low work (headers will be empty after
 692       *              calling); false otherwise.
 693       */
 694      bool TryLowWorkHeadersSync(Peer& peer, CNode& pfrom,
 695                                    const CBlockIndex* chain_start_header,
 696                                    std::vector<CBlockHeader>& headers)
 697          EXCLUSIVE_LOCKS_REQUIRED(!peer.m_headers_sync_mutex, !m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
 698  
 699      /** Return true if the given header is an ancestor of
 700       *  m_chainman.m_best_header or our current tip */
 701      bool IsAncestorOfBestHeaderOrTip(const CBlockIndex* header) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 702  
 703      /** Request further headers from this peer with a given locator.
 704       * We don't issue a getheaders message if we have a recent one outstanding.
 705       * This returns true if a getheaders is actually sent, and false otherwise.
 706       */
 707      bool MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
 708      /** Potentially fetch blocks from this peer upon receipt of a new headers tip */
 709      void HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header);
 710      /** Update peer state based on received headers message */
 711      void UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer, const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers)
 712          EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
 713  
 714      void SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req);
 715  
 716      /** Send a message to a peer */
 717      void PushMessage(CNode& node, CSerializedNetMsg&& msg) const { m_connman.PushMessage(&node, std::move(msg)); }
 718      template <typename... Args>
 719      void MakeAndPushMessage(CNode& node, std::string msg_type, Args&&... args) const
 720      {
 721          m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...));
 722      }
 723  
 724      /** Send a version message to a peer */
 725      void PushNodeVersion(CNode& pnode, const Peer& peer);
 726  
 727      /** Send a ping message every PING_INTERVAL or if requested via RPC. May
 728       *  mark the peer to be disconnected if a ping has timed out.
 729       *  We use mockable time for ping timeouts, so setmocktime may cause pings
 730       *  to time out. */
 731      void MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now);
 732  
 733      /** Send `addr` messages on a regular schedule. */
 734      void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
 735  
 736      /** Send a single `sendheaders` message, after we have completed headers sync with a peer. */
 737      void MaybeSendSendHeaders(CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
 738  
 739      /** Relay (gossip) an address to a few randomly chosen nodes.
 740       *
 741       * @param[in] originator   The id of the peer that sent us the address. We don't want to relay it back.
 742       * @param[in] addr         Address to relay.
 743       * @param[in] fReachable   Whether the address' network is reachable. We relay unreachable
 744       *                         addresses less.
 745       */
 746      void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
 747  
 748      /** Send `feefilter` message. */
 749      void MaybeSendFeefilter(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
 750  
 751      FastRandomContext m_rng GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
 752  
 753      FeeFilterRounder m_fee_filter_rounder GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
 754  
 755      const CChainParams& m_chainparams;
 756      CConnman& m_connman;
 757      AddrMan& m_addrman;
 758      /** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */
 759      BanMan* const m_banman;
 760      ChainstateManager& m_chainman;
 761      CTxMemPool& m_mempool;
 762  
 763      /** Synchronizes tx download including TxRequestTracker, rejection filters, and TxOrphanage.
 764       * Lock invariants:
 765       * - A txhash (txid or wtxid) in m_txrequest is not also in m_orphanage.
 766       * - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_rejects.
 767       * - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_rejects_reconsiderable.
 768       * - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_confirmed_transactions.
 769       * - Each data structure's limits hold (m_orphanage max size, m_txrequest per-peer limits, etc).
 770       */
 771      Mutex m_tx_download_mutex ACQUIRED_BEFORE(m_mempool.cs);
 772      node::TxDownloadManager m_txdownloadman GUARDED_BY(m_tx_download_mutex);
 773  
 774      std::unique_ptr<TxReconciliationTracker> m_txreconciliation;
 775  
 776      /** The height of the best chain */
 777      std::atomic<int> m_best_height{-1};
 778      /** The time of the best chain tip block */
 779      std::atomic<std::chrono::seconds> m_best_block_time{0s};
 780  
 781      /** Next time to check for stale tip */
 782      std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s};
 783  
 784      node::Warnings& m_warnings;
 785      TimeOffsets m_outbound_time_offsets{m_warnings};
 786  
 787      const Options m_opts;
 788  
 789      bool RejectIncomingTxs(const CNode& peer) const;
 790  
 791      /** Whether we've completed initial sync yet, for determining when to turn
 792        * on extra block-relay-only peers. */
 793      bool m_initial_sync_finished GUARDED_BY(cs_main){false};
 794  
 795      /** Protects m_peer_map. This mutex must not be locked while holding a lock
 796       *  on any of the mutexes inside a Peer object. */
 797      mutable Mutex m_peer_mutex;
 798      /**
 799       * Map of all Peer objects, keyed by peer id. This map is protected
 800       * by the m_peer_mutex. Once a shared pointer reference is
 801       * taken, the lock may be released. Individual fields are protected by
 802       * their own locks.
 803       */
 804      std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
 805  
 806      /** Map maintaining per-node state. */
 807      std::map<NodeId, CNodeState> m_node_states GUARDED_BY(cs_main);
 808  
 809      /** Get a pointer to a const CNodeState, used when not mutating the CNodeState object. */
 810      const CNodeState* State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 811      /** Get a pointer to a mutable CNodeState. */
 812      CNodeState* State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 813  
 814      uint32_t GetFetchFlags(const Peer& peer) const;
 815  
 816      std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us};
 817  
 818      /** Number of nodes with fSyncStarted. */
 819      int nSyncStarted GUARDED_BY(cs_main) = 0;
 820  
 821      /** Hash of the last block we received via INV */
 822      uint256 m_last_block_inv_triggering_headers_sync GUARDED_BY(g_msgproc_mutex){};
 823  
 824      /**
 825       * Sources of received blocks, saved to be able punish them when processing
 826       * happens afterwards.
 827       * Set mapBlockSource[hash].second to false if the node should not be
 828       * punished if the block is invalid.
 829       */
 830      std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
 831  
 832      /** Number of peers with wtxid relay. */
 833      std::atomic<int> m_wtxid_relay_peers{0};
 834  
 835      /** Number of outbound peers with m_chain_sync.m_protect. */
 836      int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
 837  
 838      /** Number of preferable block download peers. */
 839      int m_num_preferred_download_peers GUARDED_BY(cs_main){0};
 840  
 841      /** Stalling timeout for blocks in IBD */
 842      std::atomic<std::chrono::seconds> m_block_stalling_timeout{BLOCK_STALLING_TIMEOUT_DEFAULT};
 843  
 844      /**
 845       * For sending `inv`s to inbound peers, we use a single (exponentially
 846       * distributed) timer for all peers. If we used a separate timer for each
 847       * peer, a spy node could make multiple inbound connections to us to
 848       * accurately determine when we received the transaction (and potentially
 849       * determine the transaction's origin). */
 850      std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now,
 851                                                  std::chrono::seconds average_interval) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
 852  
 853  
 854      // All of the following cache a recent block, and are protected by m_most_recent_block_mutex
 855      Mutex m_most_recent_block_mutex;
 856      std::shared_ptr<const CBlock> m_most_recent_block GUARDED_BY(m_most_recent_block_mutex);
 857      std::shared_ptr<const CBlockHeaderAndShortTxIDs> m_most_recent_compact_block GUARDED_BY(m_most_recent_block_mutex);
 858      uint256 m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex);
 859      std::unique_ptr<const std::map<uint256, CTransactionRef>> m_most_recent_block_txs GUARDED_BY(m_most_recent_block_mutex);
 860  
 861      // Data about the low-work headers synchronization, aggregated from all peers' HeadersSyncStates.
 862      /** Mutex guarding the other m_headers_presync_* variables. */
 863      Mutex m_headers_presync_mutex;
 864      /** A type to represent statistics about a peer's low-work headers sync.
 865       *
 866       * - The first field is the total verified amount of work in that synchronization.
 867       * - The second is:
 868       *   - nullopt: the sync is in REDOWNLOAD phase (phase 2).
 869       *   - {height, timestamp}: the sync has the specified tip height and block timestamp (phase 1).
 870       */
 871      using HeadersPresyncStats = std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>;
 872      /** Statistics for all peers in low-work headers sync. */
 873      std::map<NodeId, HeadersPresyncStats> m_headers_presync_stats GUARDED_BY(m_headers_presync_mutex) {};
 874      /** The peer with the most-work entry in m_headers_presync_stats. */
 875      NodeId m_headers_presync_bestpeer GUARDED_BY(m_headers_presync_mutex) {-1};
 876      /** The m_headers_presync_stats improved, and needs signalling. */
 877      std::atomic_bool m_headers_presync_should_signal{false};
 878  
 879      /** Height of the highest block announced using BIP 152 high-bandwidth mode. */
 880      int m_highest_fast_announce GUARDED_BY(::cs_main){0};
 881  
 882      /** Have we requested this block from a peer */
 883      bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 884  
 885      /** Have we requested this block from an outbound peer */
 886      bool IsBlockRequestedFromOutbound(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex);
 887  
 888      /** Remove this block from our tracked requested blocks. Called if:
 889       *  - the block has been received from a peer
 890       *  - the request for the block has timed out
 891       * If "from_peer" is specified, then only remove the block if it is in
 892       * flight from that peer (to avoid one peer's network traffic from
 893       * affecting another's state).
 894       */
 895      void RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 896  
 897      /* Mark a block as in flight
 898       * Returns false, still setting pit, if the block was already in flight from the same peer
 899       * pit will only be valid as long as the same cs_main lock is being held
 900       */
 901      bool BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 902  
 903      bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 904  
 905      /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
 906       *  at most count entries.
 907       */
 908      void FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 909  
 910      /** Request blocks for the background chainstate, if one is in use. */
 911      void TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex* from_tip, const CBlockIndex* target_block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 912  
 913      /**
 914      * \brief Find next blocks to download from a peer after a starting block.
 915      *
 916      * \param vBlocks      Vector of blocks to download which will be appended to.
 917      * \param peer         Peer which blocks will be downloaded from.
 918      * \param state        Pointer to the state of the peer.
 919      * \param pindexWalk   Pointer to the starting block to add to vBlocks.
 920      * \param count        Maximum number of blocks to allow in vBlocks. No more
 921      *                     blocks will be added if it reaches this size.
 922      * \param nWindowEnd   Maximum height of blocks to allow in vBlocks. No
 923      *                     blocks will be added above this height.
 924      * \param activeChain  Optional pointer to a chain to compare against. If
 925      *                     provided, any next blocks which are already contained
 926      *                     in this chain will not be appended to vBlocks, but
 927      *                     instead will be used to update the
 928      *                     state->pindexLastCommonBlock pointer.
 929      * \param nodeStaller  Optional pointer to a NodeId variable that will receive
 930      *                     the ID of another peer that might be causing this peer
 931      *                     to stall. This is set to the ID of the peer which
 932      *                     first requested the first in-flight block in the
 933      *                     download window. It is only set if vBlocks is empty at
 934      *                     the end of this function call and if increasing
 935      *                     nWindowEnd by 1 would cause it to be non-empty (which
 936      *                     indicates the download might be stalled because every
 937      *                     block in the window is in flight and no other peer is
 938      *                     trying to download the next block).
 939      */
 940      void FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain=nullptr, NodeId* nodeStaller=nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 941  
 942      /* Multimap used to preserve insertion order */
 943      typedef std::multimap<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator>> BlockDownloadMap;
 944      BlockDownloadMap mapBlocksInFlight GUARDED_BY(cs_main);
 945  
 946      /** When our tip was last updated. */
 947      std::atomic<std::chrono::seconds> m_last_tip_update{0s};
 948  
 949      /** Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). */
 950      CTransactionRef FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid)
 951          EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, NetEventsInterface::g_msgproc_mutex);
 952  
 953      void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc)
 954          EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, peer.m_getdata_requests_mutex, NetEventsInterface::g_msgproc_mutex)
 955          LOCKS_EXCLUDED(::cs_main);
 956  
 957      /** Process a new block. Perform any post-processing housekeeping */
 958      void ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked);
 959  
 960      /** Process compact block txns  */
 961      void ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions)
 962          EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex);
 963  
 964      /**
 965       * When a peer sends us a valid block, instruct it to announce blocks to us
 966       * using CMPCTBLOCK if possible by adding its nodeid to the end of
 967       * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by
 968       * removing the first element if necessary.
 969       */
 970      void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex);
 971  
 972      /** Stack of nodes which we have set to announce using compact blocks */
 973      std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
 974  
 975      /** Number of peers from which we're downloading blocks. */
 976      int m_peers_downloading_from GUARDED_BY(cs_main) = 0;
 977  
 978      void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
 979  
 980      /** Orphan/conflicted/etc transactions that are kept for compact block reconstruction.
 981       *  The last -blockreconstructionextratxn/DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN of
 982       *  these are kept in a ring buffer */
 983      std::vector<CTransactionRef> vExtraTxnForCompact GUARDED_BY(g_msgproc_mutex);
 984      /** Offset into vExtraTxnForCompact to insert the next tx */
 985      size_t vExtraTxnForCompactIt GUARDED_BY(g_msgproc_mutex) = 0;
 986  
 987      /** Check whether the last unknown block a peer advertised is not yet known. */
 988      void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 989      /** Update tracking information about which blocks a peer is assumed to have. */
 990      void UpdateBlockAvailability(NodeId nodeid, const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 991      bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 992  
 993      /**
 994       * Estimates the distance, in blocks, between the best-known block and the network chain tip.
 995       * Utilizes the best-block time and the chainparams blocks spacing to approximate it.
 996       */
 997      int64_t ApproximateBestBlockDepth() const;
 998  
 999      /**
1000       * To prevent fingerprinting attacks, only send blocks/headers outside of
1001       * the active chain if they are no more than a month older (both in time,
1002       * and in best equivalent proof of work) than the best header chain we know
1003       * about and we fully-validated them at some point.
1004       */
1005      bool BlockRequestAllowed(const CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1006      bool AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1007      void ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv)
1008          EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex);
1009  
1010      /**
1011       * Validation logic for compact filters request handling.
1012       *
1013       * May disconnect from the peer in the case of a bad request.
1014       *
1015       * @param[in]   node            The node that we received the request from
1016       * @param[in]   peer            The peer that we received the request from
1017       * @param[in]   filter_type     The filter type the request is for. Must be basic filters.
1018       * @param[in]   start_height    The start height for the request
1019       * @param[in]   stop_hash       The stop_hash for the request
1020       * @param[in]   max_height_diff The maximum number of items permitted to request, as specified in BIP 157
1021       * @param[out]  stop_index      The CBlockIndex for the stop_hash block, if the request can be serviced.
1022       * @param[out]  filter_index    The filter index, if the request can be serviced.
1023       * @return                      True if the request can be serviced.
1024       */
1025      bool PrepareBlockFilterRequest(CNode& node, Peer& peer,
1026                                     BlockFilterType filter_type, uint32_t start_height,
1027                                     const uint256& stop_hash, uint32_t max_height_diff,
1028                                     const CBlockIndex*& stop_index,
1029                                     BlockFilterIndex*& filter_index);
1030  
1031      /**
1032       * Handle a cfilters request.
1033       *
1034       * May disconnect from the peer in the case of a bad request.
1035       *
1036       * @param[in]   node            The node that we received the request from
1037       * @param[in]   peer            The peer that we received the request from
1038       * @param[in]   vRecv           The raw message received
1039       */
1040      void ProcessGetCFilters(CNode& node, Peer& peer, DataStream& vRecv);
1041  
1042      /**
1043       * Handle a cfheaders request.
1044       *
1045       * May disconnect from the peer in the case of a bad request.
1046       *
1047       * @param[in]   node            The node that we received the request from
1048       * @param[in]   peer            The peer that we received the request from
1049       * @param[in]   vRecv           The raw message received
1050       */
1051      void ProcessGetCFHeaders(CNode& node, Peer& peer, DataStream& vRecv);
1052  
1053      /**
1054       * Handle a getcfcheckpt request.
1055       *
1056       * May disconnect from the peer in the case of a bad request.
1057       *
1058       * @param[in]   node            The node that we received the request from
1059       * @param[in]   peer            The peer that we received the request from
1060       * @param[in]   vRecv           The raw message received
1061       */
1062      void ProcessGetCFCheckPt(CNode& node, Peer& peer, DataStream& vRecv);
1063  
1064      /** Checks if address relay is permitted with peer. If needed, initializes
1065       * the m_addr_known bloom filter and sets m_addr_relay_enabled to true.
1066       *
1067       *  @return   True if address relay is enabled with peer
1068       *            False if address relay is disallowed
1069       */
1070      bool SetupAddressRelay(const CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1071  
1072      void AddAddressKnown(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1073      void PushAddress(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1074  
1075      void LogBlockHeader(const CBlockIndex& index, const CNode& peer, bool via_compact_block);
1076  };
1077  
1078  const CNodeState* PeerManagerImpl::State(NodeId pnode) const
1079  {
1080      std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode);
1081      if (it == m_node_states.end())
1082          return nullptr;
1083      return &it->second;
1084  }
1085  
1086  CNodeState* PeerManagerImpl::State(NodeId pnode)
1087  {
1088      return const_cast<CNodeState*>(std::as_const(*this).State(pnode));
1089  }
1090  
1091  /**
1092   * Whether the peer supports the address. For example, a peer that does not
1093   * implement BIP155 cannot receive Tor v3 addresses because it requires
1094   * ADDRv2 (BIP155) encoding.
1095   */
1096  static bool IsAddrCompatible(const Peer& peer, const CAddress& addr)
1097  {
1098      return peer.m_wants_addrv2 || addr.IsAddrV1Compatible();
1099  }
1100  
1101  void PeerManagerImpl::AddAddressKnown(Peer& peer, const CAddress& addr)
1102  {
1103      assert(peer.m_addr_known);
1104      peer.m_addr_known->insert(addr.GetKey());
1105  }
1106  
1107  void PeerManagerImpl::PushAddress(Peer& peer, const CAddress& addr)
1108  {
1109      // Known checking here is only to save space from duplicates.
1110      // Before sending, we'll filter it again for known addresses that were
1111      // added after addresses were pushed.
1112      assert(peer.m_addr_known);
1113      if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) && IsAddrCompatible(peer, addr)) {
1114          if (peer.m_addrs_to_send.size() >= MAX_ADDR_TO_SEND) {
1115              peer.m_addrs_to_send[m_rng.randrange(peer.m_addrs_to_send.size())] = addr;
1116          } else {
1117              peer.m_addrs_to_send.push_back(addr);
1118          }
1119      }
1120  }
1121  
1122  static void AddKnownTx(Peer& peer, const uint256& hash)
1123  {
1124      auto tx_relay = peer.GetTxRelay();
1125      if (!tx_relay) return;
1126  
1127      LOCK(tx_relay->m_tx_inventory_mutex);
1128      tx_relay->m_tx_inventory_known_filter.insert(hash);
1129  }
1130  
1131  /** Whether this peer can serve us blocks. */
1132  static bool CanServeBlocks(const Peer& peer)
1133  {
1134      return peer.m_their_services & (NODE_NETWORK|NODE_NETWORK_LIMITED);
1135  }
1136  
1137  /** Whether this peer can only serve limited recent blocks (e.g. because
1138   *  it prunes old blocks) */
1139  static bool IsLimitedPeer(const Peer& peer)
1140  {
1141      return (!(peer.m_their_services & NODE_NETWORK) &&
1142               (peer.m_their_services & NODE_NETWORK_LIMITED));
1143  }
1144  
1145  /** Whether this peer can serve us witness data */
1146  static bool CanServeWitnesses(const Peer& peer)
1147  {
1148      return peer.m_their_services & NODE_WITNESS;
1149  }
1150  
1151  std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now,
1152                                                               std::chrono::seconds average_interval)
1153  {
1154      if (m_next_inv_to_inbounds.load() < now) {
1155          // If this function were called from multiple threads simultaneously
1156          // it would possible that both update the next send variable, and return a different result to their caller.
1157          // This is not possible in practice as only the net processing thread invokes this function.
1158          m_next_inv_to_inbounds = now + m_rng.rand_exp_duration(average_interval);
1159      }
1160      return m_next_inv_to_inbounds;
1161  }
1162  
1163  bool PeerManagerImpl::IsBlockRequested(const uint256& hash)
1164  {
1165      return mapBlocksInFlight.count(hash);
1166  }
1167  
1168  bool PeerManagerImpl::IsBlockRequestedFromOutbound(const uint256& hash)
1169  {
1170      for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) {
1171          auto [nodeid, block_it] = range.first->second;
1172          PeerRef peer{GetPeerRef(nodeid)};
1173          if (peer && !peer->m_is_inbound) return true;
1174      }
1175  
1176      return false;
1177  }
1178  
1179  void PeerManagerImpl::RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer)
1180  {
1181      auto range = mapBlocksInFlight.equal_range(hash);
1182      if (range.first == range.second) {
1183          // Block was not requested from any peer
1184          return;
1185      }
1186  
1187      // We should not have requested too many of this block
1188      Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
1189  
1190      while (range.first != range.second) {
1191          const auto& [node_id, list_it]{range.first->second};
1192  
1193          if (from_peer && *from_peer != node_id) {
1194              range.first++;
1195              continue;
1196          }
1197  
1198          CNodeState& state = *Assert(State(node_id));
1199  
1200          if (state.vBlocksInFlight.begin() == list_it) {
1201              // First block on the queue was received, update the start download time for the next one
1202              state.m_downloading_since = std::max(state.m_downloading_since, GetTime<std::chrono::microseconds>());
1203          }
1204          state.vBlocksInFlight.erase(list_it);
1205  
1206          if (state.vBlocksInFlight.empty()) {
1207              // Last validated block on the queue for this peer was received.
1208              m_peers_downloading_from--;
1209          }
1210          state.m_stalling_since = 0us;
1211  
1212          range.first = mapBlocksInFlight.erase(range.first);
1213      }
1214  }
1215  
1216  bool PeerManagerImpl::BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit)
1217  {
1218      const uint256& hash{block.GetBlockHash()};
1219  
1220      CNodeState *state = State(nodeid);
1221      assert(state != nullptr);
1222  
1223      Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
1224  
1225      // Short-circuit most stuff in case it is from the same node
1226      for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) {
1227          if (range.first->second.first == nodeid) {
1228              if (pit) {
1229                  *pit = &range.first->second.second;
1230              }
1231              return false;
1232          }
1233      }
1234  
1235      // Make sure it's not being fetched already from same peer.
1236      RemoveBlockRequest(hash, nodeid);
1237  
1238      std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
1239              {&block, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)});
1240      if (state->vBlocksInFlight.size() == 1) {
1241          // We're starting a block download (batch) from this peer.
1242          state->m_downloading_since = GetTime<std::chrono::microseconds>();
1243          m_peers_downloading_from++;
1244      }
1245      auto itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it)));
1246      if (pit) {
1247          *pit = &itInFlight->second.second;
1248      }
1249      return true;
1250  }
1251  
1252  void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
1253  {
1254      AssertLockHeld(cs_main);
1255  
1256      // When in -blocksonly mode, never request high-bandwidth mode from peers. Our
1257      // mempool will not contain the transactions necessary to reconstruct the
1258      // compact block.
1259      if (m_opts.ignore_incoming_txs) return;
1260  
1261      CNodeState* nodestate = State(nodeid);
1262      PeerRef peer{GetPeerRef(nodeid)};
1263      if (!nodestate || !nodestate->m_provides_cmpctblocks) {
1264          // Don't request compact blocks if the peer has not signalled support
1265          return;
1266      }
1267  
1268      int num_outbound_hb_peers = 0;
1269      for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
1270          if (*it == nodeid) {
1271              lNodesAnnouncingHeaderAndIDs.erase(it);
1272              lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
1273              return;
1274          }
1275          PeerRef peer_ref{GetPeerRef(*it)};
1276          if (peer_ref && !peer_ref->m_is_inbound) ++num_outbound_hb_peers;
1277      }
1278      if (peer && peer->m_is_inbound) {
1279          // If we're adding an inbound HB peer, make sure we're not removing
1280          // our last outbound HB peer in the process.
1281          if (lNodesAnnouncingHeaderAndIDs.size() >= 3 && num_outbound_hb_peers == 1) {
1282              PeerRef remove_peer{GetPeerRef(lNodesAnnouncingHeaderAndIDs.front())};
1283              if (remove_peer && !remove_peer->m_is_inbound) {
1284                  // Put the HB outbound peer in the second slot, so that it
1285                  // doesn't get removed.
1286                  std::swap(lNodesAnnouncingHeaderAndIDs.front(), *std::next(lNodesAnnouncingHeaderAndIDs.begin()));
1287              }
1288          }
1289      }
1290      m_connman.ForNode(nodeid, [this](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
1291          AssertLockHeld(::cs_main);
1292          if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
1293              // As per BIP152, we only get 3 of our peers to announce
1294              // blocks using compact encodings.
1295              m_connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [this](CNode* pnodeStop){
1296                  MakeAndPushMessage(*pnodeStop, NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION);
1297                  // save BIP152 bandwidth state: we select peer to be low-bandwidth
1298                  pnodeStop->m_bip152_highbandwidth_to = false;
1299                  return true;
1300              });
1301              lNodesAnnouncingHeaderAndIDs.pop_front();
1302          }
1303          MakeAndPushMessage(*pfrom, NetMsgType::SENDCMPCT, /*high_bandwidth=*/true, /*version=*/CMPCTBLOCKS_VERSION);
1304          // save BIP152 bandwidth state: we select peer to be high-bandwidth
1305          pfrom->m_bip152_highbandwidth_to = true;
1306          lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
1307          return true;
1308      });
1309  }
1310  
1311  bool PeerManagerImpl::TipMayBeStale()
1312  {
1313      AssertLockHeld(cs_main);
1314      const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
1315      if (m_last_tip_update.load() == 0s) {
1316          m_last_tip_update = GetTime<std::chrono::seconds>();
1317      }
1318      return m_last_tip_update.load() < GetTime<std::chrono::seconds>() - std::chrono::seconds{consensusParams.nPowTargetSpacing * 3} && mapBlocksInFlight.empty();
1319  }
1320  
1321  int64_t PeerManagerImpl::ApproximateBestBlockDepth() const
1322  {
1323      return (GetTime<std::chrono::seconds>() - m_best_block_time.load()).count() / m_chainparams.GetConsensus().nPowTargetSpacing;
1324  }
1325  
1326  bool PeerManagerImpl::CanDirectFetch()
1327  {
1328      return m_chainman.ActiveChain().Tip()->Time() > NodeClock::now() - m_chainparams.GetConsensus().PowTargetSpacing() * 20;
1329  }
1330  
1331  static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1332  {
1333      if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
1334          return true;
1335      if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
1336          return true;
1337      return false;
1338  }
1339  
1340  void PeerManagerImpl::ProcessBlockAvailability(NodeId nodeid) {
1341      CNodeState *state = State(nodeid);
1342      assert(state != nullptr);
1343  
1344      if (!state->hashLastUnknownBlock.IsNull()) {
1345          const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock);
1346          if (pindex && pindex->nChainWork > 0) {
1347              if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1348                  state->pindexBestKnownBlock = pindex;
1349              }
1350              state->hashLastUnknownBlock.SetNull();
1351          }
1352      }
1353  }
1354  
1355  void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
1356      CNodeState *state = State(nodeid);
1357      assert(state != nullptr);
1358  
1359      ProcessBlockAvailability(nodeid);
1360  
1361      const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
1362      if (pindex && pindex->nChainWork > 0) {
1363          // An actually better block was announced.
1364          if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1365              state->pindexBestKnownBlock = pindex;
1366          }
1367      } else {
1368          // An unknown block was announced; just assume that the latest one is the best one.
1369          state->hashLastUnknownBlock = hash;
1370      }
1371  }
1372  
1373  // Logic for calculating which blocks to download from a given peer, given our current tip.
1374  void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller)
1375  {
1376      if (count == 0)
1377          return;
1378  
1379      vBlocks.reserve(vBlocks.size() + count);
1380      CNodeState *state = State(peer.m_id);
1381      assert(state != nullptr);
1382  
1383      // Make sure pindexBestKnownBlock is up to date, we'll need it.
1384      ProcessBlockAvailability(peer.m_id);
1385  
1386      if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < m_chainman.ActiveChain().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) {
1387          // This peer has nothing interesting.
1388          return;
1389      }
1390  
1391      // When we sync with AssumeUtxo and discover the snapshot is not in the peer's best chain, abort:
1392      // We can't reorg to this chain due to missing undo data until the background sync has finished,
1393      // so downloading blocks from it would be futile.
1394      const CBlockIndex* snap_base{m_chainman.GetSnapshotBaseBlock()};
1395      if (snap_base && state->pindexBestKnownBlock->GetAncestor(snap_base->nHeight) != snap_base) {
1396          LogDebug(BCLog::NET, "Not downloading blocks from peer=%d, which doesn't have the snapshot block in its best chain.\n", peer.m_id);
1397          return;
1398      }
1399  
1400      // Bootstrap quickly by guessing a parent of our best tip is the forking point.
1401      // Guessing wrong in either direction is not a problem.
1402      // Also reset pindexLastCommonBlock after a snapshot was loaded, so that blocks after the snapshot will be prioritised for download.
1403      if (state->pindexLastCommonBlock == nullptr ||
1404          (snap_base && state->pindexLastCommonBlock->nHeight < snap_base->nHeight)) {
1405          state->pindexLastCommonBlock = m_chainman.ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight, m_chainman.ActiveChain().Height())];
1406      }
1407  
1408      // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
1409      // of its current tip anymore. Go back enough to fix that.
1410      state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
1411      if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
1412          return;
1413  
1414      const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
1415      // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
1416      // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
1417      // download that next block if the window were 1 larger.
1418      int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
1419  
1420      FindNextBlocks(vBlocks, peer, state, pindexWalk, count, nWindowEnd, &m_chainman.ActiveChain(), &nodeStaller);
1421  }
1422  
1423  void PeerManagerImpl::TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex *from_tip, const CBlockIndex* target_block)
1424  {
1425      Assert(from_tip);
1426      Assert(target_block);
1427  
1428      if (vBlocks.size() >= count) {
1429          return;
1430      }
1431  
1432      vBlocks.reserve(count);
1433      CNodeState *state = Assert(State(peer.m_id));
1434  
1435      if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->GetAncestor(target_block->nHeight) != target_block) {
1436          // This peer can't provide us the complete series of blocks leading up to the
1437          // assumeutxo snapshot base.
1438          //
1439          // Presumably this peer's chain has less work than our ActiveChain()'s tip, or else we
1440          // will eventually crash when we try to reorg to it. Let other logic
1441          // deal with whether we disconnect this peer.
1442          //
1443          // TODO at some point in the future, we might choose to request what blocks
1444          // this peer does have from the historical chain, despite it not having a
1445          // complete history beneath the snapshot base.
1446          return;
1447      }
1448  
1449      FindNextBlocks(vBlocks, peer, state, from_tip, count, std::min<int>(from_tip->nHeight + BLOCK_DOWNLOAD_WINDOW, target_block->nHeight));
1450  }
1451  
1452  void PeerManagerImpl::FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain, NodeId* nodeStaller)
1453  {
1454      std::vector<const CBlockIndex*> vToFetch;
1455      int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
1456      bool is_limited_peer = IsLimitedPeer(peer);
1457      NodeId waitingfor = -1;
1458      while (pindexWalk->nHeight < nMaxHeight) {
1459          // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
1460          // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
1461          // as iterating over ~100 CBlockIndex* entries anyway.
1462          int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
1463          vToFetch.resize(nToFetch);
1464          pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
1465          vToFetch[nToFetch - 1] = pindexWalk;
1466          for (unsigned int i = nToFetch - 1; i > 0; i--) {
1467              vToFetch[i - 1] = vToFetch[i]->pprev;
1468          }
1469  
1470          // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
1471          // are not yet downloaded and not in flight to vBlocks. In the meantime, update
1472          // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
1473          // already part of our chain (and therefore don't need it even if pruned).
1474          for (const CBlockIndex* pindex : vToFetch) {
1475              if (!pindex->IsValid(BLOCK_VALID_TREE)) {
1476                  // We consider the chain that this peer is on invalid.
1477                  return;
1478              }
1479  
1480              if (!CanServeWitnesses(peer) && DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) {
1481                  // We wouldn't download this block or its descendants from this peer.
1482                  return;
1483              }
1484  
1485              if (pindex->nStatus & BLOCK_HAVE_DATA || (activeChain && activeChain->Contains(pindex))) {
1486                  if (activeChain && pindex->HaveNumChainTxs()) {
1487                      state->pindexLastCommonBlock = pindex;
1488                  }
1489                  continue;
1490              }
1491  
1492              // Is block in-flight?
1493              if (IsBlockRequested(pindex->GetBlockHash())) {
1494                  if (waitingfor == -1) {
1495                      // This is the first already-in-flight block.
1496                      waitingfor = mapBlocksInFlight.lower_bound(pindex->GetBlockHash())->second.first;
1497                  }
1498                  continue;
1499              }
1500  
1501              // The block is not already downloaded, and not yet in flight.
1502              if (pindex->nHeight > nWindowEnd) {
1503                  // We reached the end of the window.
1504                  if (vBlocks.size() == 0 && waitingfor != peer.m_id) {
1505                      // We aren't able to fetch anything, but we would be if the download window was one larger.
1506                      if (nodeStaller) *nodeStaller = waitingfor;
1507                  }
1508                  return;
1509              }
1510  
1511              // Don't request blocks that go further than what limited peers can provide
1512              if (is_limited_peer && (state->pindexBestKnownBlock->nHeight - pindex->nHeight >= static_cast<int>(NODE_NETWORK_LIMITED_MIN_BLOCKS) - 2 /* two blocks buffer for possible races */)) {
1513                  continue;
1514              }
1515  
1516              vBlocks.push_back(pindex);
1517              if (vBlocks.size() == count) {
1518                  return;
1519              }
1520          }
1521      }
1522  }
1523  
1524  } // namespace
1525  
1526  void PeerManagerImpl::PushNodeVersion(CNode& pnode, const Peer& peer)
1527  {
1528      uint64_t my_services{peer.m_our_services};
1529      const int64_t nTime{count_seconds(GetTime<std::chrono::seconds>())};
1530      uint64_t nonce = pnode.GetLocalNonce();
1531      const int nNodeStartingHeight{m_best_height};
1532      NodeId nodeid = pnode.GetId();
1533      CAddress addr = pnode.addr;
1534  
1535      CService addr_you = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ? addr : CService();
1536      uint64_t your_services{addr.nServices};
1537  
1538      const bool tx_relay{!RejectIncomingTxs(pnode)};
1539      MakeAndPushMessage(pnode, NetMsgType::VERSION, PROTOCOL_VERSION, my_services, nTime,
1540              your_services, CNetAddr::V1(addr_you), // Together the pre-version-31402 serialization of CAddress "addrYou" (without nTime)
1541              my_services, CNetAddr::V1(CService{}), // Together the pre-version-31402 serialization of CAddress "addrMe" (without nTime)
1542              nonce, strSubVersion, nNodeStartingHeight, tx_relay);
1543  
1544      if (fLogIPs) {
1545          LogDebug(BCLog::NET, "send version message: version %d, blocks=%d, them=%s, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addr_you.ToStringAddrPort(), tx_relay, nodeid);
1546      } else {
1547          LogDebug(BCLog::NET, "send version message: version %d, blocks=%d, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, tx_relay, nodeid);
1548      }
1549  }
1550  
1551  void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
1552  {
1553      LOCK(cs_main);
1554      CNodeState *state = State(node);
1555      if (state) state->m_last_block_announcement = time_in_seconds;
1556  }
1557  
1558  void PeerManagerImpl::InitializeNode(const CNode& node, ServiceFlags our_services)
1559  {
1560      NodeId nodeid = node.GetId();
1561      {
1562          LOCK(cs_main); // For m_node_states
1563          m_node_states.try_emplace(m_node_states.end(), nodeid);
1564      }
1565      WITH_LOCK(m_tx_download_mutex, m_txdownloadman.CheckIsEmpty(nodeid));
1566  
1567      if (NetPermissions::HasFlag(node.m_permission_flags, NetPermissionFlags::BloomFilter)) {
1568          our_services = static_cast<ServiceFlags>(our_services | NODE_BLOOM);
1569      }
1570  
1571      PeerRef peer = std::make_shared<Peer>(nodeid, our_services, node.IsInboundConn());
1572      {
1573          LOCK(m_peer_mutex);
1574          m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
1575      }
1576  }
1577  
1578  void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler& scheduler)
1579  {
1580      std::set<uint256> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
1581  
1582      for (const auto& txid : unbroadcast_txids) {
1583          CTransactionRef tx = m_mempool.get(txid);
1584  
1585          if (tx != nullptr) {
1586              RelayTransaction(txid, tx->GetWitnessHash());
1587          } else {
1588              m_mempool.RemoveUnbroadcastTx(txid, true);
1589          }
1590      }
1591  
1592      // Schedule next run for 10-15 minutes in the future.
1593      // We add randomness on every cycle to avoid the possibility of P2P fingerprinting.
1594      const auto delta = 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min);
1595      scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
1596  }
1597  
1598  void PeerManagerImpl::FinalizeNode(const CNode& node)
1599  {
1600      NodeId nodeid = node.GetId();
1601      {
1602      LOCK(cs_main);
1603      {
1604          // We remove the PeerRef from g_peer_map here, but we don't always
1605          // destruct the Peer. Sometimes another thread is still holding a
1606          // PeerRef, so the refcount is >= 1. Be careful not to do any
1607          // processing here that assumes Peer won't be changed before it's
1608          // destructed.
1609          PeerRef peer = RemovePeer(nodeid);
1610          assert(peer != nullptr);
1611          m_wtxid_relay_peers -= peer->m_wtxid_relay;
1612          assert(m_wtxid_relay_peers >= 0);
1613      }
1614      CNodeState *state = State(nodeid);
1615      assert(state != nullptr);
1616  
1617      if (state->fSyncStarted)
1618          nSyncStarted--;
1619  
1620      for (const QueuedBlock& entry : state->vBlocksInFlight) {
1621          auto range = mapBlocksInFlight.equal_range(entry.pindex->GetBlockHash());
1622          while (range.first != range.second) {
1623              auto [node_id, list_it] = range.first->second;
1624              if (node_id != nodeid) {
1625                  range.first++;
1626              } else {
1627                  range.first = mapBlocksInFlight.erase(range.first);
1628              }
1629          }
1630      }
1631      {
1632          LOCK(m_tx_download_mutex);
1633          m_txdownloadman.DisconnectedPeer(nodeid);
1634      }
1635      if (m_txreconciliation) m_txreconciliation->ForgetPeer(nodeid);
1636      m_num_preferred_download_peers -= state->fPreferredDownload;
1637      m_peers_downloading_from -= (!state->vBlocksInFlight.empty());
1638      assert(m_peers_downloading_from >= 0);
1639      m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
1640      assert(m_outbound_peers_with_protect_from_disconnect >= 0);
1641  
1642      m_node_states.erase(nodeid);
1643  
1644      if (m_node_states.empty()) {
1645          // Do a consistency check after the last peer is removed.
1646          assert(mapBlocksInFlight.empty());
1647          assert(m_num_preferred_download_peers == 0);
1648          assert(m_peers_downloading_from == 0);
1649          assert(m_outbound_peers_with_protect_from_disconnect == 0);
1650          assert(m_wtxid_relay_peers == 0);
1651          WITH_LOCK(m_tx_download_mutex, m_txdownloadman.CheckIsEmpty());
1652      }
1653      } // cs_main
1654      if (node.fSuccessfullyConnected &&
1655          !node.IsBlockOnlyConn() && !node.IsInboundConn()) {
1656          // Only change visible addrman state for full outbound peers.  We don't
1657          // call Connected() for feeler connections since they don't have
1658          // fSuccessfullyConnected set.
1659          m_addrman.Connected(node.addr);
1660      }
1661      {
1662          LOCK(m_headers_presync_mutex);
1663          m_headers_presync_stats.erase(nodeid);
1664      }
1665      LogDebug(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
1666  }
1667  
1668  bool PeerManagerImpl::HasAllDesirableServiceFlags(ServiceFlags services) const
1669  {
1670      // Shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services)
1671      return !(GetDesirableServiceFlags(services) & (~services));
1672  }
1673  
1674  ServiceFlags PeerManagerImpl::GetDesirableServiceFlags(ServiceFlags services) const
1675  {
1676      if (services & NODE_NETWORK_LIMITED) {
1677          // Limited peers are desirable when we are close to the tip.
1678          if (ApproximateBestBlockDepth() < NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS) {
1679              return ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS);
1680          }
1681      }
1682      return ServiceFlags(NODE_NETWORK | NODE_WITNESS);
1683  }
1684  
1685  PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const
1686  {
1687      LOCK(m_peer_mutex);
1688      auto it = m_peer_map.find(id);
1689      return it != m_peer_map.end() ? it->second : nullptr;
1690  }
1691  
1692  PeerRef PeerManagerImpl::RemovePeer(NodeId id)
1693  {
1694      PeerRef ret;
1695      LOCK(m_peer_mutex);
1696      auto it = m_peer_map.find(id);
1697      if (it != m_peer_map.end()) {
1698          ret = std::move(it->second);
1699          m_peer_map.erase(it);
1700      }
1701      return ret;
1702  }
1703  
1704  bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const
1705  {
1706      {
1707          LOCK(cs_main);
1708          const CNodeState* state = State(nodeid);
1709          if (state == nullptr)
1710              return false;
1711          stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
1712          stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
1713          for (const QueuedBlock& queue : state->vBlocksInFlight) {
1714              if (queue.pindex)
1715                  stats.vHeightInFlight.push_back(queue.pindex->nHeight);
1716          }
1717      }
1718  
1719      PeerRef peer = GetPeerRef(nodeid);
1720      if (peer == nullptr) return false;
1721      stats.their_services = peer->m_their_services;
1722      stats.m_starting_height = peer->m_starting_height;
1723      // It is common for nodes with good ping times to suddenly become lagged,
1724      // due to a new block arriving or other large transfer.
1725      // Merely reporting pingtime might fool the caller into thinking the node was still responsive,
1726      // since pingtime does not update until the ping is complete, which might take a while.
1727      // So, if a ping is taking an unusually long time in flight,
1728      // the caller can immediately detect that this is happening.
1729      auto ping_wait{0us};
1730      if ((0 != peer->m_ping_nonce_sent) && (0 != peer->m_ping_start.load().count())) {
1731          ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
1732      }
1733  
1734      if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
1735          stats.m_relay_txs = WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs);
1736          stats.m_fee_filter_received = tx_relay->m_fee_filter_received.load();
1737      } else {
1738          stats.m_relay_txs = false;
1739          stats.m_fee_filter_received = 0;
1740      }
1741  
1742      stats.m_ping_wait = ping_wait;
1743      stats.m_addr_processed = peer->m_addr_processed.load();
1744      stats.m_addr_rate_limited = peer->m_addr_rate_limited.load();
1745      stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load();
1746      {
1747          LOCK(peer->m_headers_sync_mutex);
1748          if (peer->m_headers_sync) {
1749              stats.presync_height = peer->m_headers_sync->GetPresyncHeight();
1750          }
1751      }
1752      stats.time_offset = peer->m_time_offset;
1753  
1754      return true;
1755  }
1756  
1757  std::vector<TxOrphanage::OrphanTxBase> PeerManagerImpl::GetOrphanTransactions()
1758  {
1759      LOCK(m_tx_download_mutex);
1760      return m_txdownloadman.GetOrphanTransactions();
1761  }
1762  
1763  PeerManagerInfo PeerManagerImpl::GetInfo() const
1764  {
1765      return PeerManagerInfo{
1766          .median_outbound_time_offset = m_outbound_time_offsets.Median(),
1767          .ignores_incoming_txs = m_opts.ignore_incoming_txs,
1768      };
1769  }
1770  
1771  void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef& tx)
1772  {
1773      if (m_opts.max_extra_txs <= 0)
1774          return;
1775      if (!vExtraTxnForCompact.size())
1776          vExtraTxnForCompact.resize(m_opts.max_extra_txs);
1777      vExtraTxnForCompact[vExtraTxnForCompactIt] = tx;
1778      vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % m_opts.max_extra_txs;
1779  }
1780  
1781  void PeerManagerImpl::Misbehaving(Peer& peer, const std::string& message)
1782  {
1783      LOCK(peer.m_misbehavior_mutex);
1784  
1785      const std::string message_prefixed = message.empty() ? "" : (": " + message);
1786      peer.m_should_discourage = true;
1787      LogDebug(BCLog::NET, "Misbehaving: peer=%d%s\n", peer.m_id, message_prefixed);
1788      TRACEPOINT(net, misbehaving_connection,
1789          peer.m_id,
1790          message.c_str()
1791      );
1792  }
1793  
1794  void PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
1795                                                bool via_compact_block, const std::string& message)
1796  {
1797      PeerRef peer{GetPeerRef(nodeid)};
1798      switch (state.GetResult()) {
1799      case BlockValidationResult::BLOCK_RESULT_UNSET:
1800          break;
1801      case BlockValidationResult::BLOCK_HEADER_LOW_WORK:
1802          // We didn't try to process the block because the header chain may have
1803          // too little work.
1804          break;
1805      // The node is providing invalid data:
1806      case BlockValidationResult::BLOCK_CONSENSUS:
1807      case BlockValidationResult::BLOCK_MUTATED:
1808          if (!via_compact_block) {
1809              if (peer) Misbehaving(*peer, message);
1810              return;
1811          }
1812          break;
1813      case BlockValidationResult::BLOCK_CACHED_INVALID:
1814          {
1815              // Discourage outbound (but not inbound) peers if on an invalid chain.
1816              // Exempt HB compact block peers. Manual connections are always protected from discouragement.
1817              if (peer && !via_compact_block && !peer->m_is_inbound) {
1818                  if (peer) Misbehaving(*peer, message);
1819                  return;
1820              }
1821              break;
1822          }
1823      case BlockValidationResult::BLOCK_INVALID_HEADER:
1824      case BlockValidationResult::BLOCK_INVALID_PREV:
1825          if (peer) Misbehaving(*peer, message);
1826          return;
1827      // Conflicting (but not necessarily invalid) data or different policy:
1828      case BlockValidationResult::BLOCK_MISSING_PREV:
1829          if (peer) Misbehaving(*peer, message);
1830          return;
1831      case BlockValidationResult::BLOCK_TIME_FUTURE:
1832          break;
1833      }
1834      if (message != "") {
1835          LogDebug(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1836      }
1837  }
1838  
1839  void PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state)
1840  {
1841      PeerRef peer{GetPeerRef(nodeid)};
1842      switch (state.GetResult()) {
1843      case TxValidationResult::TX_RESULT_UNSET:
1844          break;
1845      // The node is providing invalid data:
1846      case TxValidationResult::TX_CONSENSUS:
1847          if (peer) Misbehaving(*peer, "");
1848          return;
1849      // Conflicting (but not necessarily invalid) data or different policy:
1850      case TxValidationResult::TX_INPUTS_NOT_STANDARD:
1851      case TxValidationResult::TX_NOT_STANDARD:
1852      case TxValidationResult::TX_MISSING_INPUTS:
1853      case TxValidationResult::TX_PREMATURE_SPEND:
1854      case TxValidationResult::TX_WITNESS_MUTATED:
1855      case TxValidationResult::TX_WITNESS_STRIPPED:
1856      case TxValidationResult::TX_CONFLICT:
1857      case TxValidationResult::TX_MEMPOOL_POLICY:
1858      case TxValidationResult::TX_NO_MEMPOOL:
1859      case TxValidationResult::TX_RECONSIDERABLE:
1860      case TxValidationResult::TX_UNKNOWN:
1861          break;
1862      }
1863  }
1864  
1865  bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex)
1866  {
1867      AssertLockHeld(cs_main);
1868      if (m_chainman.ActiveChain().Contains(pindex)) return true;
1869      return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (m_chainman.m_best_header != nullptr) &&
1870             (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) &&
1871             (GetBlockProofEquivalentTime(*m_chainman.m_best_header, *pindex, *m_chainman.m_best_header, m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT);
1872  }
1873  
1874  std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBlockIndex& block_index)
1875  {
1876      if (m_chainman.m_blockman.LoadingBlocks()) return "Loading blocks ...";
1877  
1878      // Ensure this peer exists and hasn't been disconnected
1879      PeerRef peer = GetPeerRef(peer_id);
1880      if (peer == nullptr) return "Peer does not exist";
1881  
1882      // Ignore pre-segwit peers
1883      if (!CanServeWitnesses(*peer)) return "Pre-SegWit peer";
1884  
1885      LOCK(cs_main);
1886  
1887      // Forget about all prior requests
1888      RemoveBlockRequest(block_index.GetBlockHash(), std::nullopt);
1889  
1890      // Mark block as in-flight
1891      if (!BlockRequested(peer_id, block_index)) return "Already requested from this peer";
1892  
1893      // Construct message to request the block
1894      const uint256& hash{block_index.GetBlockHash()};
1895      std::vector<CInv> invs{CInv(MSG_BLOCK | MSG_WITNESS_FLAG, hash)};
1896  
1897      // Send block request message to the peer
1898      bool success = m_connman.ForNode(peer_id, [this, &invs](CNode* node) {
1899          this->MakeAndPushMessage(*node, NetMsgType::GETDATA, invs);
1900          return true;
1901      });
1902  
1903      if (!success) return "Peer not fully connected";
1904  
1905      LogDebug(BCLog::NET, "Requesting block %s from peer=%d\n",
1906                   hash.ToString(), peer_id);
1907      return std::nullopt;
1908  }
1909  
1910  std::unique_ptr<PeerManager> PeerManager::make(CConnman& connman, AddrMan& addrman,
1911                                                 BanMan* banman, ChainstateManager& chainman,
1912                                                 CTxMemPool& pool, node::Warnings& warnings, Options opts)
1913  {
1914      return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman, pool, warnings, opts);
1915  }
1916  
1917  PeerManagerImpl::PeerManagerImpl(CConnman& connman, AddrMan& addrman,
1918                                   BanMan* banman, ChainstateManager& chainman,
1919                                   CTxMemPool& pool, node::Warnings& warnings, Options opts)
1920      : m_rng{opts.deterministic_rng},
1921        m_fee_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}, m_rng},
1922        m_chainparams(chainman.GetParams()),
1923        m_connman(connman),
1924        m_addrman(addrman),
1925        m_banman(banman),
1926        m_chainman(chainman),
1927        m_mempool(pool),
1928        m_txdownloadman(node::TxDownloadOptions{pool, m_rng, opts.max_orphan_txs, opts.deterministic_rng}),
1929        m_warnings{warnings},
1930        m_opts{opts}
1931  {
1932      // While Erlay support is incomplete, it must be enabled explicitly via -txreconciliation.
1933      // This argument can go away after Erlay support is complete.
1934      if (opts.reconcile_txs) {
1935          m_txreconciliation = std::make_unique<TxReconciliationTracker>(TXRECONCILIATION_VERSION);
1936      }
1937  }
1938  
1939  void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler)
1940  {
1941      // Stale tip checking and peer eviction are on two different timers, but we
1942      // don't want them to get out of sync due to drift in the scheduler, so we
1943      // combine them in one function and schedule at the quicker (peer-eviction)
1944      // timer.
1945      static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer");
1946      scheduler.scheduleEvery([this] { this->CheckForStaleTipAndEvictPeers(); }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
1947  
1948      // schedule next run for 10-15 minutes in the future
1949      const auto delta = 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min);
1950      scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
1951  }
1952  
1953  void PeerManagerImpl::ActiveTipChange(const CBlockIndex& new_tip, bool is_ibd)
1954  {
1955      // Ensure mempool mutex was released, otherwise deadlock may occur if another thread holding
1956      // m_tx_download_mutex waits on the mempool mutex.
1957      AssertLockNotHeld(m_mempool.cs);
1958      AssertLockNotHeld(m_tx_download_mutex);
1959  
1960      if (!is_ibd) {
1961          LOCK(m_tx_download_mutex);
1962          // If the chain tip has changed, previously rejected transactions might now be valid, e.g. due
1963          // to a timelock. Reset the rejection filters to give those transactions another chance if we
1964          // see them again.
1965          m_txdownloadman.ActiveTipChange();
1966      }
1967  }
1968  
1969  /**
1970   * Evict orphan txn pool entries based on a newly connected
1971   * block, remember the recently confirmed transactions, and delete tracked
1972   * announcements for them. Also save the time of the last tip update and
1973   * possibly reduce dynamic block stalling timeout.
1974   */
1975  void PeerManagerImpl::BlockConnected(
1976      ChainstateRole role,
1977      const std::shared_ptr<const CBlock>& pblock,
1978      const CBlockIndex* pindex)
1979  {
1980      // Update this for all chainstate roles so that we don't mistakenly see peers
1981      // helping us do background IBD as having a stale tip.
1982      m_last_tip_update = GetTime<std::chrono::seconds>();
1983  
1984      // In case the dynamic timeout was doubled once or more, reduce it slowly back to its default value
1985      auto stalling_timeout = m_block_stalling_timeout.load();
1986      Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT);
1987      if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) {
1988          const auto new_timeout = std::max(std::chrono::duration_cast<std::chrono::seconds>(stalling_timeout * 0.85), BLOCK_STALLING_TIMEOUT_DEFAULT);
1989          if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
1990              LogDebug(BCLog::NET, "Decreased stalling timeout to %d seconds\n", count_seconds(new_timeout));
1991          }
1992      }
1993  
1994      // The following task can be skipped since we don't maintain a mempool for
1995      // the ibd/background chainstate.
1996      if (role == ChainstateRole::BACKGROUND) {
1997          return;
1998      }
1999      LOCK(m_tx_download_mutex);
2000      m_txdownloadman.BlockConnected(pblock);
2001  }
2002  
2003  void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex)
2004  {
2005      LOCK(m_tx_download_mutex);
2006      m_txdownloadman.BlockDisconnected();
2007  }
2008  
2009  /**
2010   * Maintain state about the best-seen block and fast-announce a compact block
2011   * to compatible peers.
2012   */
2013  void PeerManagerImpl::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock)
2014  {
2015      auto pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock, FastRandomContext().rand64());
2016  
2017      LOCK(cs_main);
2018  
2019      if (pindex->nHeight <= m_highest_fast_announce)
2020          return;
2021      m_highest_fast_announce = pindex->nHeight;
2022  
2023      if (!DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) return;
2024  
2025      uint256 hashBlock(pblock->GetHash());
2026      const std::shared_future<CSerializedNetMsg> lazy_ser{
2027          std::async(std::launch::deferred, [&] { return NetMsg::Make(NetMsgType::CMPCTBLOCK, *pcmpctblock); })};
2028  
2029      {
2030          auto most_recent_block_txs = std::make_unique<std::map<uint256, CTransactionRef>>();
2031          for (const auto& tx : pblock->vtx) {
2032              most_recent_block_txs->emplace(tx->GetHash(), tx);
2033              most_recent_block_txs->emplace(tx->GetWitnessHash(), tx);
2034          }
2035  
2036          LOCK(m_most_recent_block_mutex);
2037          m_most_recent_block_hash = hashBlock;
2038          m_most_recent_block = pblock;
2039          m_most_recent_compact_block = pcmpctblock;
2040          m_most_recent_block_txs = std::move(most_recent_block_txs);
2041      }
2042  
2043      m_connman.ForEachNode([this, pindex, &lazy_ser, &hashBlock](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
2044          AssertLockHeld(::cs_main);
2045  
2046          if (pnode->GetCommonVersion() < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect)
2047              return;
2048          ProcessBlockAvailability(pnode->GetId());
2049          CNodeState &state = *State(pnode->GetId());
2050          // If the peer has, or we announced to them the previous block already,
2051          // but we don't think they have this one, go ahead and announce it
2052          if (state.m_requested_hb_cmpctblocks && !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
2053  
2054              LogDebug(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerManager::NewPoWValidBlock",
2055                      hashBlock.ToString(), pnode->GetId());
2056  
2057              const CSerializedNetMsg& ser_cmpctblock{lazy_ser.get()};
2058              PushMessage(*pnode, ser_cmpctblock.Copy());
2059              state.pindexBestHeaderSent = pindex;
2060          }
2061      });
2062  }
2063  
2064  /**
2065   * Update our best height and announce any block hashes which weren't previously
2066   * in m_chainman.ActiveChain() to our peers.
2067   */
2068  void PeerManagerImpl::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload)
2069  {
2070      SetBestBlock(pindexNew->nHeight, std::chrono::seconds{pindexNew->GetBlockTime()});
2071  
2072      // Don't relay inventory during initial block download.
2073      if (fInitialDownload) return;
2074  
2075      // Find the hashes of all blocks that weren't previously in the best chain.
2076      std::vector<uint256> vHashes;
2077      const CBlockIndex *pindexToAnnounce = pindexNew;
2078      while (pindexToAnnounce != pindexFork) {
2079          vHashes.push_back(pindexToAnnounce->GetBlockHash());
2080          pindexToAnnounce = pindexToAnnounce->pprev;
2081          if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
2082              // Limit announcements in case of a huge reorganization.
2083              // Rely on the peer's synchronization mechanism in that case.
2084              break;
2085          }
2086      }
2087  
2088      {
2089          LOCK(m_peer_mutex);
2090          for (auto& it : m_peer_map) {
2091              Peer& peer = *it.second;
2092              LOCK(peer.m_block_inv_mutex);
2093              for (const uint256& hash : vHashes | std::views::reverse) {
2094                  peer.m_blocks_for_headers_relay.push_back(hash);
2095              }
2096          }
2097      }
2098  
2099      m_connman.WakeMessageHandler();
2100  }
2101  
2102  /**
2103   * Handle invalid block rejection and consequent peer discouragement, maintain which
2104   * peers announce compact blocks.
2105   */
2106  void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationState& state)
2107  {
2108      LOCK(cs_main);
2109  
2110      const uint256 hash(block.GetHash());
2111      std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash);
2112  
2113      // If the block failed validation, we know where it came from and we're still connected
2114      // to that peer, maybe punish.
2115      if (state.IsInvalid() &&
2116          it != mapBlockSource.end() &&
2117          State(it->second.first)) {
2118              MaybePunishNodeForBlock(/*nodeid=*/ it->second.first, state, /*via_compact_block=*/ !it->second.second);
2119      }
2120      // Check that:
2121      // 1. The block is valid
2122      // 2. We're not in initial block download
2123      // 3. This is currently the best block we're aware of. We haven't updated
2124      //    the tip yet so we have no way to check this directly here. Instead we
2125      //    just check that there are currently no other blocks in flight.
2126      else if (state.IsValid() &&
2127               !m_chainman.IsInitialBlockDownload() &&
2128               mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
2129          if (it != mapBlockSource.end()) {
2130              MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
2131          }
2132      }
2133      if (it != mapBlockSource.end())
2134          mapBlockSource.erase(it);
2135  }
2136  
2137  //////////////////////////////////////////////////////////////////////////////
2138  //
2139  // Messages
2140  //
2141  
2142  bool PeerManagerImpl::AlreadyHaveBlock(const uint256& block_hash)
2143  {
2144      return m_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr;
2145  }
2146  
2147  void PeerManagerImpl::SendPings()
2148  {
2149      LOCK(m_peer_mutex);
2150      for(auto& it : m_peer_map) it.second->m_ping_queued = true;
2151  }
2152  
2153  void PeerManagerImpl::RelayTransaction(const uint256& txid, const uint256& wtxid)
2154  {
2155      LOCK(m_peer_mutex);
2156      for(auto& it : m_peer_map) {
2157          Peer& peer = *it.second;
2158          auto tx_relay = peer.GetTxRelay();
2159          if (!tx_relay) continue;
2160  
2161          LOCK(tx_relay->m_tx_inventory_mutex);
2162          // Only queue transactions for announcement once the version handshake
2163          // is completed. The time of arrival for these transactions is
2164          // otherwise at risk of leaking to a spy, if the spy is able to
2165          // distinguish transactions received during the handshake from the rest
2166          // in the announcement.
2167          if (tx_relay->m_next_inv_send_time == 0s) continue;
2168  
2169          const uint256& hash{peer.m_wtxid_relay ? wtxid : txid};
2170          if (!tx_relay->m_tx_inventory_known_filter.contains(hash)) {
2171              tx_relay->m_tx_inventory_to_send.insert(hash);
2172          }
2173      };
2174  }
2175  
2176  void PeerManagerImpl::RelayAddress(NodeId originator,
2177                                     const CAddress& addr,
2178                                     bool fReachable)
2179  {
2180      // We choose the same nodes within a given 24h window (if the list of connected
2181      // nodes does not change) and we don't relay to nodes that already know an
2182      // address. So within 24h we will likely relay a given address once. This is to
2183      // prevent a peer from unjustly giving their address better propagation by sending
2184      // it to us repeatedly.
2185  
2186      if (!fReachable && !addr.IsRelayable()) return;
2187  
2188      // Relay to a limited number of other nodes
2189      // Use deterministic randomness to send to the same nodes for 24 hours
2190      // at a time so the m_addr_knowns of the chosen nodes prevent repeats
2191      const uint64_t hash_addr{CServiceHash(0, 0)(addr)};
2192      const auto current_time{GetTime<std::chrono::seconds>()};
2193      // Adding address hash makes exact rotation time different per address, while preserving periodicity.
2194      const uint64_t time_addr{(static_cast<uint64_t>(count_seconds(current_time)) + hash_addr) / count_seconds(ROTATE_ADDR_RELAY_DEST_INTERVAL)};
2195      const CSipHasher hasher{m_connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY)
2196                                  .Write(hash_addr)
2197                                  .Write(time_addr)};
2198  
2199      // Relay reachable addresses to 2 peers. Unreachable addresses are relayed randomly to 1 or 2 peers.
2200      unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
2201  
2202      std::array<std::pair<uint64_t, Peer*>, 2> best{{{0, nullptr}, {0, nullptr}}};
2203      assert(nRelayNodes <= best.size());
2204  
2205      LOCK(m_peer_mutex);
2206  
2207      for (auto& [id, peer] : m_peer_map) {
2208          if (peer->m_addr_relay_enabled && id != originator && IsAddrCompatible(*peer, addr)) {
2209              uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize();
2210              for (unsigned int i = 0; i < nRelayNodes; i++) {
2211                   if (hashKey > best[i].first) {
2212                       std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
2213                       best[i] = std::make_pair(hashKey, peer.get());
2214                       break;
2215                   }
2216              }
2217          }
2218      };
2219  
2220      for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
2221          PushAddress(*best[i].second, addr);
2222      }
2223  }
2224  
2225  void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv)
2226  {
2227      std::shared_ptr<const CBlock> a_recent_block;
2228      std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
2229      {
2230          LOCK(m_most_recent_block_mutex);
2231          a_recent_block = m_most_recent_block;
2232          a_recent_compact_block = m_most_recent_compact_block;
2233      }
2234  
2235      bool need_activate_chain = false;
2236      {
2237          LOCK(cs_main);
2238          const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash);
2239          if (pindex) {
2240              if (pindex->HaveNumChainTxs() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) &&
2241                      pindex->IsValid(BLOCK_VALID_TREE)) {
2242                  // If we have the block and all of its parents, but have not yet validated it,
2243                  // we might be in the middle of connecting it (ie in the unlock of cs_main
2244                  // before ActivateBestChain but after AcceptBlock).
2245                  // In this case, we need to run ActivateBestChain prior to checking the relay
2246                  // conditions below.
2247                  need_activate_chain = true;
2248              }
2249          }
2250      } // release cs_main before calling ActivateBestChain
2251      if (need_activate_chain) {
2252          BlockValidationState state;
2253          if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) {
2254              LogDebug(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
2255          }
2256      }
2257  
2258      const CBlockIndex* pindex{nullptr};
2259      const CBlockIndex* tip{nullptr};
2260      bool can_direct_fetch{false};
2261      FlatFilePos block_pos{};
2262      {
2263          LOCK(cs_main);
2264          pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash);
2265          if (!pindex) {
2266              return;
2267          }
2268          if (!BlockRequestAllowed(pindex)) {
2269              LogDebug(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom.GetId());
2270              return;
2271          }
2272          // disconnect node in case we have reached the outbound limit for serving historical blocks
2273          if (m_connman.OutboundTargetReached(true) &&
2274              (((m_chainman.m_best_header != nullptr) && (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) &&
2275              !pfrom.HasPermission(NetPermissionFlags::Download) // nodes with the download permission may exceed target
2276          ) {
2277              LogDebug(BCLog::NET, "historical block serving limit reached, %s\n", pfrom.DisconnectMsg(fLogIPs));
2278              pfrom.fDisconnect = true;
2279              return;
2280          }
2281          tip = m_chainman.ActiveChain().Tip();
2282          // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
2283          if (!pfrom.HasPermission(NetPermissionFlags::NoBan) && (
2284                  (((peer.m_our_services & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) && (tip->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
2285             )) {
2286              LogDebug(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold, %s\n", pfrom.DisconnectMsg(fLogIPs));
2287              //disconnect node and prevent it from stalling (would otherwise wait for the missing block)
2288              pfrom.fDisconnect = true;
2289              return;
2290          }
2291          // Pruned nodes may have deleted the block, so check whether
2292          // it's available before trying to send.
2293          if (!(pindex->nStatus & BLOCK_HAVE_DATA)) {
2294              return;
2295          }
2296          can_direct_fetch = CanDirectFetch();
2297          block_pos = pindex->GetBlockPos();
2298      }
2299  
2300      std::shared_ptr<const CBlock> pblock;
2301      if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) {
2302          pblock = a_recent_block;
2303      } else if (inv.IsMsgWitnessBlk()) {
2304          // Fast-path: in this case it is possible to serve the block directly from disk,
2305          // as the network format matches the format on disk
2306          std::vector<uint8_t> block_data;
2307          if (!m_chainman.m_blockman.ReadRawBlock(block_data, block_pos)) {
2308              if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) {
2309                  LogDebug(BCLog::NET, "Block was pruned before it could be read, %s\n", pfrom.DisconnectMsg(fLogIPs));
2310              } else {
2311                  LogError("Cannot load block from disk, %s\n", pfrom.DisconnectMsg(fLogIPs));
2312              }
2313              pfrom.fDisconnect = true;
2314              return;
2315          }
2316          MakeAndPushMessage(pfrom, NetMsgType::BLOCK, std::span{block_data});
2317          // Don't set pblock as we've sent the block
2318      } else {
2319          // Send block from disk
2320          std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
2321          if (!m_chainman.m_blockman.ReadBlock(*pblockRead, block_pos)) {
2322              if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) {
2323                  LogDebug(BCLog::NET, "Block was pruned before it could be read, %s\n", pfrom.DisconnectMsg(fLogIPs));
2324              } else {
2325                  LogError("Cannot load block from disk, %s\n", pfrom.DisconnectMsg(fLogIPs));
2326              }
2327              pfrom.fDisconnect = true;
2328              return;
2329          }
2330          pblock = pblockRead;
2331      }
2332      if (pblock) {
2333          if (inv.IsMsgBlk()) {
2334              MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_NO_WITNESS(*pblock));
2335          } else if (inv.IsMsgWitnessBlk()) {
2336              MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_WITH_WITNESS(*pblock));
2337          } else if (inv.IsMsgFilteredBlk()) {
2338              bool sendMerkleBlock = false;
2339              CMerkleBlock merkleBlock;
2340              if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) {
2341                  LOCK(tx_relay->m_bloom_filter_mutex);
2342                  if (tx_relay->m_bloom_filter) {
2343                      sendMerkleBlock = true;
2344                      merkleBlock = CMerkleBlock(*pblock, *tx_relay->m_bloom_filter);
2345                  }
2346              }
2347              if (sendMerkleBlock) {
2348                  MakeAndPushMessage(pfrom, NetMsgType::MERKLEBLOCK, merkleBlock);
2349                  // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
2350                  // This avoids hurting performance by pointlessly requiring a round-trip
2351                  // Note that there is currently no way for a node to request any single transactions we didn't send here -
2352                  // they must either disconnect and retry or request the full block.
2353                  // Thus, the protocol spec specified allows for us to provide duplicate txn here,
2354                  // however we MUST always provide at least what the remote peer needs
2355                  typedef std::pair<unsigned int, uint256> PairType;
2356                  for (PairType& pair : merkleBlock.vMatchedTxn)
2357                      MakeAndPushMessage(pfrom, NetMsgType::TX, TX_NO_WITNESS(*pblock->vtx[pair.first]));
2358              }
2359              // else
2360              // no response
2361          } else if (inv.IsMsgCmpctBlk()) {
2362              // If a peer is asking for old blocks, we're almost guaranteed
2363              // they won't have a useful mempool to match against a compact block,
2364              // and we don't feel like constructing the object for them, so
2365              // instead we respond with the full, non-compact block.
2366              if (can_direct_fetch && pindex->nHeight >= tip->nHeight - MAX_CMPCTBLOCK_DEPTH) {
2367                  if (a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) {
2368                      MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, *a_recent_compact_block);
2369                  } else {
2370                      CBlockHeaderAndShortTxIDs cmpctblock{*pblock, m_rng.rand64()};
2371                      MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, cmpctblock);
2372                  }
2373              } else {
2374                  MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_WITH_WITNESS(*pblock));
2375              }
2376          }
2377      }
2378  
2379      {
2380          LOCK(peer.m_block_inv_mutex);
2381          // Trigger the peer node to send a getblocks request for the next batch of inventory
2382          if (inv.hash == peer.m_continuation_block) {
2383              // Send immediately. This must send even if redundant,
2384              // and we want it right after the last block so they don't
2385              // wait for other stuff first.
2386              std::vector<CInv> vInv;
2387              vInv.emplace_back(MSG_BLOCK, tip->GetBlockHash());
2388              MakeAndPushMessage(pfrom, NetMsgType::INV, vInv);
2389              peer.m_continuation_block.SetNull();
2390          }
2391      }
2392  }
2393  
2394  CTransactionRef PeerManagerImpl::FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid)
2395  {
2396      // If a tx was in the mempool prior to the last INV for this peer, permit the request.
2397      auto txinfo = m_mempool.info_for_relay(gtxid, tx_relay.m_last_inv_sequence);
2398      if (txinfo.tx) {
2399          return std::move(txinfo.tx);
2400      }
2401  
2402      // Or it might be from the most recent block
2403      {
2404          LOCK(m_most_recent_block_mutex);
2405          if (m_most_recent_block_txs != nullptr) {
2406              auto it = m_most_recent_block_txs->find(gtxid.GetHash());
2407              if (it != m_most_recent_block_txs->end()) return it->second;
2408          }
2409      }
2410  
2411      return {};
2412  }
2413  
2414  void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc)
2415  {
2416      AssertLockNotHeld(cs_main);
2417  
2418      auto tx_relay = peer.GetTxRelay();
2419  
2420      std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
2421      std::vector<CInv> vNotFound;
2422  
2423      // Process as many TX items from the front of the getdata queue as
2424      // possible, since they're common and it's efficient to batch process
2425      // them.
2426      while (it != peer.m_getdata_requests.end() && it->IsGenTxMsg()) {
2427          if (interruptMsgProc) return;
2428          // The send buffer provides backpressure. If there's no space in
2429          // the buffer, pause processing until the next call.
2430          if (pfrom.fPauseSend) break;
2431  
2432          const CInv &inv = *it++;
2433  
2434          if (tx_relay == nullptr) {
2435              // Ignore GETDATA requests for transactions from block-relay-only
2436              // peers and peers that asked us not to announce transactions.
2437              continue;
2438          }
2439  
2440          CTransactionRef tx = FindTxForGetData(*tx_relay, ToGenTxid(inv));
2441          if (tx) {
2442              // WTX and WITNESS_TX imply we serialize with witness
2443              const auto maybe_with_witness = (inv.IsMsgTx() ? TX_NO_WITNESS : TX_WITH_WITNESS);
2444              MakeAndPushMessage(pfrom, NetMsgType::TX, maybe_with_witness(*tx));
2445              m_mempool.RemoveUnbroadcastTx(tx->GetHash());
2446          } else {
2447              vNotFound.push_back(inv);
2448          }
2449      }
2450  
2451      // Only process one BLOCK item per call, since they're uncommon and can be
2452      // expensive to process.
2453      if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
2454          const CInv &inv = *it++;
2455          if (inv.IsGenBlkMsg()) {
2456              ProcessGetBlockData(pfrom, peer, inv);
2457          }
2458          // else: If the first item on the queue is an unknown type, we erase it
2459          // and continue processing the queue on the next call.
2460          // NOTE: previously we wouldn't do so and the peer sending us a malformed GETDATA could
2461          // result in never making progress and this thread using 100% allocated CPU. See
2462          // https://bitcoincore.org/en/2024/07/03/disclose-getdata-cpu.
2463      }
2464  
2465      peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
2466  
2467      if (!vNotFound.empty()) {
2468          // Let the peer know that we didn't find what it asked for, so it doesn't
2469          // have to wait around forever.
2470          // SPV clients care about this message: it's needed when they are
2471          // recursively walking the dependencies of relevant unconfirmed
2472          // transactions. SPV clients want to do that because they want to know
2473          // about (and store and rebroadcast and risk analyze) the dependencies
2474          // of transactions relevant to them, without having to download the
2475          // entire memory pool.
2476          // Also, other nodes can use these messages to automatically request a
2477          // transaction from some other peer that announced it, and stop
2478          // waiting for us to respond.
2479          // In normal operation, we often send NOTFOUND messages for parents of
2480          // transactions that we relay; if a peer is missing a parent, they may
2481          // assume we have them and request the parents from us.
2482          MakeAndPushMessage(pfrom, NetMsgType::NOTFOUND, vNotFound);
2483      }
2484  }
2485  
2486  uint32_t PeerManagerImpl::GetFetchFlags(const Peer& peer) const
2487  {
2488      uint32_t nFetchFlags = 0;
2489      if (CanServeWitnesses(peer)) {
2490          nFetchFlags |= MSG_WITNESS_FLAG;
2491      }
2492      return nFetchFlags;
2493  }
2494  
2495  void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req)
2496  {
2497      BlockTransactions resp(req);
2498      for (size_t i = 0; i < req.indexes.size(); i++) {
2499          if (req.indexes[i] >= block.vtx.size()) {
2500              Misbehaving(peer, "getblocktxn with out-of-bounds tx indices");
2501              return;
2502          }
2503          resp.txn[i] = block.vtx[req.indexes[i]];
2504      }
2505  
2506      MakeAndPushMessage(pfrom, NetMsgType::BLOCKTXN, resp);
2507  }
2508  
2509  bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer)
2510  {
2511      // Do these headers have proof-of-work matching what's claimed?
2512      if (!HasValidProofOfWork(headers, consensusParams)) {
2513          Misbehaving(peer, "header with invalid proof of work");
2514          return false;
2515      }
2516  
2517      // Are these headers connected to each other?
2518      if (!CheckHeadersAreContinuous(headers)) {
2519          Misbehaving(peer, "non-continuous headers sequence");
2520          return false;
2521      }
2522      return true;
2523  }
2524  
2525  arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold()
2526  {
2527      arith_uint256 near_chaintip_work = 0;
2528      LOCK(cs_main);
2529      if (m_chainman.ActiveChain().Tip() != nullptr) {
2530          const CBlockIndex *tip = m_chainman.ActiveChain().Tip();
2531          // Use a 144 block buffer, so that we'll accept headers that fork from
2532          // near our tip.
2533          near_chaintip_work = tip->nChainWork - std::min<arith_uint256>(144*GetBlockProof(*tip), tip->nChainWork);
2534      }
2535      return std::max(near_chaintip_work, m_chainman.MinimumChainWork());
2536  }
2537  
2538  /**
2539   * Special handling for unconnecting headers that might be part of a block
2540   * announcement.
2541   *
2542   * We'll send a getheaders message in response to try to connect the chain.
2543   */
2544  void PeerManagerImpl::HandleUnconnectingHeaders(CNode& pfrom, Peer& peer,
2545          const std::vector<CBlockHeader>& headers)
2546  {
2547      // Try to fill in the missing headers.
2548      const CBlockIndex* best_header{WITH_LOCK(cs_main, return m_chainman.m_best_header)};
2549      if (MaybeSendGetHeaders(pfrom, GetLocator(best_header), peer)) {
2550          LogDebug(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d)\n",
2551              headers[0].GetHash().ToString(),
2552              headers[0].hashPrevBlock.ToString(),
2553              best_header->nHeight,
2554              pfrom.GetId());
2555      }
2556  
2557      // Set hashLastUnknownBlock for this peer, so that if we
2558      // eventually get the headers - even from a different peer -
2559      // we can use this peer to download.
2560      WITH_LOCK(cs_main, UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash()));
2561  }
2562  
2563  bool PeerManagerImpl::CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const
2564  {
2565      uint256 hashLastBlock;
2566      for (const CBlockHeader& header : headers) {
2567          if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
2568              return false;
2569          }
2570          hashLastBlock = header.GetHash();
2571      }
2572      return true;
2573  }
2574  
2575  bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, std::vector<CBlockHeader>& headers)
2576  {
2577      if (peer.m_headers_sync) {
2578          auto result = peer.m_headers_sync->ProcessNextHeaders(headers, headers.size() == m_opts.max_headers_result);
2579          // If it is a valid continuation, we should treat the existing getheaders request as responded to.
2580          if (result.success) peer.m_last_getheaders_timestamp = {};
2581          if (result.request_more) {
2582              auto locator = peer.m_headers_sync->NextHeadersRequestLocator();
2583              // If we were instructed to ask for a locator, it should not be empty.
2584              Assume(!locator.vHave.empty());
2585              // We can only be instructed to request more if processing was successful.
2586              Assume(result.success);
2587              if (!locator.vHave.empty()) {
2588                  // It should be impossible for the getheaders request to fail,
2589                  // because we just cleared the last getheaders timestamp.
2590                  bool sent_getheaders = MaybeSendGetHeaders(pfrom, locator, peer);
2591                  Assume(sent_getheaders);
2592                  LogDebug(BCLog::NET, "more getheaders (from %s) to peer=%d\n",
2593                      locator.vHave.front().ToString(), pfrom.GetId());
2594              }
2595          }
2596  
2597          if (peer.m_headers_sync->GetState() == HeadersSyncState::State::FINAL) {
2598              peer.m_headers_sync.reset(nullptr);
2599  
2600              // Delete this peer's entry in m_headers_presync_stats.
2601              // If this is m_headers_presync_bestpeer, it will be replaced later
2602              // by the next peer that triggers the else{} branch below.
2603              LOCK(m_headers_presync_mutex);
2604              m_headers_presync_stats.erase(pfrom.GetId());
2605          } else {
2606              // Build statistics for this peer's sync.
2607              HeadersPresyncStats stats;
2608              stats.first = peer.m_headers_sync->GetPresyncWork();
2609              if (peer.m_headers_sync->GetState() == HeadersSyncState::State::PRESYNC) {
2610                  stats.second = {peer.m_headers_sync->GetPresyncHeight(),
2611                                  peer.m_headers_sync->GetPresyncTime()};
2612              }
2613  
2614              // Update statistics in stats.
2615              LOCK(m_headers_presync_mutex);
2616              m_headers_presync_stats[pfrom.GetId()] = stats;
2617              auto best_it = m_headers_presync_stats.find(m_headers_presync_bestpeer);
2618              bool best_updated = false;
2619              if (best_it == m_headers_presync_stats.end()) {
2620                  // If the cached best peer is outdated, iterate over all remaining ones (including
2621                  // newly updated one) to find the best one.
2622                  NodeId peer_best{-1};
2623                  const HeadersPresyncStats* stat_best{nullptr};
2624                  for (const auto& [peer, stat] : m_headers_presync_stats) {
2625                      if (!stat_best || stat > *stat_best) {
2626                          peer_best = peer;
2627                          stat_best = &stat;
2628                      }
2629                  }
2630                  m_headers_presync_bestpeer = peer_best;
2631                  best_updated = (peer_best == pfrom.GetId());
2632              } else if (best_it->first == pfrom.GetId() || stats > best_it->second) {
2633                  // pfrom was and remains the best peer, or pfrom just became best.
2634                  m_headers_presync_bestpeer = pfrom.GetId();
2635                  best_updated = true;
2636              }
2637              if (best_updated && stats.second.has_value()) {
2638                  // If the best peer updated, and it is in its first phase, signal.
2639                  m_headers_presync_should_signal = true;
2640              }
2641          }
2642  
2643          if (result.success) {
2644              // We only overwrite the headers passed in if processing was
2645              // successful.
2646              headers.swap(result.pow_validated_headers);
2647          }
2648  
2649          return result.success;
2650      }
2651      // Either we didn't have a sync in progress, or something went wrong
2652      // processing these headers, or we are returning headers to the caller to
2653      // process.
2654      return false;
2655  }
2656  
2657  bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlockIndex* chain_start_header, std::vector<CBlockHeader>& headers)
2658  {
2659      // Calculate the claimed total work on this chain.
2660      arith_uint256 total_work = chain_start_header->nChainWork + CalculateClaimedHeadersWork(headers);
2661  
2662      // Our dynamic anti-DoS threshold (minimum work required on a headers chain
2663      // before we'll store it)
2664      arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold();
2665  
2666      // Avoid DoS via low-difficulty-headers by only processing if the headers
2667      // are part of a chain with sufficient work.
2668      if (total_work < minimum_chain_work) {
2669          // Only try to sync with this peer if their headers message was full;
2670          // otherwise they don't have more headers after this so no point in
2671          // trying to sync their too-little-work chain.
2672          if (headers.size() == m_opts.max_headers_result) {
2673              // Note: we could advance to the last header in this set that is
2674              // known to us, rather than starting at the first header (which we
2675              // may already have); however this is unlikely to matter much since
2676              // ProcessHeadersMessage() already handles the case where all
2677              // headers in a received message are already known and are
2678              // ancestors of m_best_header or chainActive.Tip(), by skipping
2679              // this logic in that case. So even if the first header in this set
2680              // of headers is known, some header in this set must be new, so
2681              // advancing to the first unknown header would be a small effect.
2682              LOCK(peer.m_headers_sync_mutex);
2683              peer.m_headers_sync.reset(new HeadersSyncState(peer.m_id, m_chainparams.GetConsensus(),
2684                  chain_start_header, minimum_chain_work));
2685  
2686              // Now a HeadersSyncState object for tracking this synchronization
2687              // is created, process the headers using it as normal. Failures are
2688              // handled inside of IsContinuationOfLowWorkHeadersSync.
2689              (void)IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
2690          } else {
2691              LogDebug(BCLog::NET, "Ignoring low-work chain (height=%u) from peer=%d\n", chain_start_header->nHeight + headers.size(), pfrom.GetId());
2692          }
2693  
2694          // The peer has not yet given us a chain that meets our work threshold,
2695          // so we want to prevent further processing of the headers in any case.
2696          headers = {};
2697          return true;
2698      }
2699  
2700      return false;
2701  }
2702  
2703  bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(const CBlockIndex* header)
2704  {
2705      if (header == nullptr) {
2706          return false;
2707      } else if (m_chainman.m_best_header != nullptr && header == m_chainman.m_best_header->GetAncestor(header->nHeight)) {
2708          return true;
2709      } else if (m_chainman.ActiveChain().Contains(header)) {
2710          return true;
2711      }
2712      return false;
2713  }
2714  
2715  bool PeerManagerImpl::MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer)
2716  {
2717      const auto current_time = NodeClock::now();
2718  
2719      // Only allow a new getheaders message to go out if we don't have a recent
2720      // one already in-flight
2721      if (current_time - peer.m_last_getheaders_timestamp > HEADERS_RESPONSE_TIME) {
2722          MakeAndPushMessage(pfrom, NetMsgType::GETHEADERS, locator, uint256());
2723          peer.m_last_getheaders_timestamp = current_time;
2724          return true;
2725      }
2726      return false;
2727  }
2728  
2729  /*
2730   * Given a new headers tip ending in last_header, potentially request blocks towards that tip.
2731   * We require that the given tip have at least as much work as our tip, and for
2732   * our current tip to be "close to synced" (see CanDirectFetch()).
2733   */
2734  void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header)
2735  {
2736      LOCK(cs_main);
2737      CNodeState *nodestate = State(pfrom.GetId());
2738  
2739      if (CanDirectFetch() && last_header.IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= last_header.nChainWork) {
2740          std::vector<const CBlockIndex*> vToFetch;
2741          const CBlockIndex* pindexWalk{&last_header};
2742          // Calculate all the blocks we'd need to switch to last_header, up to a limit.
2743          while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
2744              if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
2745                      !IsBlockRequested(pindexWalk->GetBlockHash()) &&
2746                      (!DeploymentActiveAt(*pindexWalk, m_chainman, Consensus::DEPLOYMENT_SEGWIT) || CanServeWitnesses(peer))) {
2747                  // We don't have this block, and it's not yet in flight.
2748                  vToFetch.push_back(pindexWalk);
2749              }
2750              pindexWalk = pindexWalk->pprev;
2751          }
2752          // If pindexWalk still isn't on our main chain, we're looking at a
2753          // very large reorg at a time we think we're close to caught up to
2754          // the main chain -- this shouldn't really happen.  Bail out on the
2755          // direct fetch and rely on parallel download instead.
2756          if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
2757              LogDebug(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
2758                       last_header.GetBlockHash().ToString(),
2759                       last_header.nHeight);
2760          } else {
2761              std::vector<CInv> vGetData;
2762              // Download as much as possible, from earliest to latest.
2763              for (const CBlockIndex* pindex : vToFetch | std::views::reverse) {
2764                  if (nodestate->vBlocksInFlight.size() >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
2765                      // Can't download any more from this peer
2766                      break;
2767                  }
2768                  uint32_t nFetchFlags = GetFetchFlags(peer);
2769                  vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash());
2770                  BlockRequested(pfrom.GetId(), *pindex);
2771                  LogDebug(BCLog::NET, "Requesting block %s from  peer=%d\n",
2772                          pindex->GetBlockHash().ToString(), pfrom.GetId());
2773              }
2774              if (vGetData.size() > 1) {
2775                  LogDebug(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
2776                           last_header.GetBlockHash().ToString(),
2777                           last_header.nHeight);
2778              }
2779              if (vGetData.size() > 0) {
2780                  if (!m_opts.ignore_incoming_txs &&
2781                          nodestate->m_provides_cmpctblocks &&
2782                          vGetData.size() == 1 &&
2783                          mapBlocksInFlight.size() == 1 &&
2784                          last_header.pprev->IsValid(BLOCK_VALID_CHAIN)) {
2785                      // In any case, we want to download using a compact block, not a regular one
2786                      vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
2787                  }
2788                  MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vGetData);
2789              }
2790          }
2791      }
2792  }
2793  
2794  /**
2795   * Given receipt of headers from a peer ending in last_header, along with
2796   * whether that header was new and whether the headers message was full,
2797   * update the state we keep for the peer.
2798   */
2799  void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer,
2800          const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers)
2801  {
2802      LOCK(cs_main);
2803      CNodeState *nodestate = State(pfrom.GetId());
2804  
2805      UpdateBlockAvailability(pfrom.GetId(), last_header.GetBlockHash());
2806  
2807      // From here, pindexBestKnownBlock should be guaranteed to be non-null,
2808      // because it is set in UpdateBlockAvailability. Some nullptr checks
2809      // are still present, however, as belt-and-suspenders.
2810  
2811      if (received_new_header && last_header.nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
2812          nodestate->m_last_block_announcement = GetTime();
2813      }
2814  
2815      // If we're in IBD, we want outbound peers that will serve us a useful
2816      // chain. Disconnect peers that are on chains with insufficient work.
2817      if (m_chainman.IsInitialBlockDownload() && !may_have_more_headers) {
2818          // If the peer has no more headers to give us, then we know we have
2819          // their tip.
2820          if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) {
2821              // This peer has too little work on their headers chain to help
2822              // us sync -- disconnect if it is an outbound disconnection
2823              // candidate.
2824              // Note: We compare their tip to the minimum chain work (rather than
2825              // m_chainman.ActiveChain().Tip()) because we won't start block download
2826              // until we have a headers chain that has at least
2827              // the minimum chain work, even if a peer has a chain past our tip,
2828              // as an anti-DoS measure.
2829              if (pfrom.IsOutboundOrBlockRelayConn()) {
2830                  LogInfo("outbound peer headers chain has insufficient work, %s\n", pfrom.DisconnectMsg(fLogIPs));
2831                  pfrom.fDisconnect = true;
2832              }
2833          }
2834      }
2835  
2836      // If this is an outbound full-relay peer, check to see if we should protect
2837      // it from the bad/lagging chain logic.
2838      // Note that outbound block-relay peers are excluded from this protection, and
2839      // thus always subject to eviction under the bad/lagging chain logic.
2840      // See ChainSyncTimeoutState.
2841      if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) {
2842          if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
2843              LogDebug(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId());
2844              nodestate->m_chain_sync.m_protect = true;
2845              ++m_outbound_peers_with_protect_from_disconnect;
2846          }
2847      }
2848  }
2849  
2850  void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
2851                                              std::vector<CBlockHeader>&& headers,
2852                                              bool via_compact_block)
2853  {
2854      size_t nCount = headers.size();
2855  
2856      if (nCount == 0) {
2857          // Nothing interesting. Stop asking this peers for more headers.
2858          // If we were in the middle of headers sync, receiving an empty headers
2859          // message suggests that the peer suddenly has nothing to give us
2860          // (perhaps it reorged to our chain). Clear download state for this peer.
2861          LOCK(peer.m_headers_sync_mutex);
2862          if (peer.m_headers_sync) {
2863              peer.m_headers_sync.reset(nullptr);
2864              LOCK(m_headers_presync_mutex);
2865              m_headers_presync_stats.erase(pfrom.GetId());
2866          }
2867          // A headers message with no headers cannot be an announcement, so assume
2868          // it is a response to our last getheaders request, if there is one.
2869          peer.m_last_getheaders_timestamp = {};
2870          return;
2871      }
2872  
2873      // Before we do any processing, make sure these pass basic sanity checks.
2874      // We'll rely on headers having valid proof-of-work further down, as an
2875      // anti-DoS criteria (note: this check is required before passing any
2876      // headers into HeadersSyncState).
2877      if (!CheckHeadersPoW(headers, m_chainparams.GetConsensus(), peer)) {
2878          // Misbehaving() calls are handled within CheckHeadersPoW(), so we can
2879          // just return. (Note that even if a header is announced via compact
2880          // block, the header itself should be valid, so this type of error can
2881          // always be punished.)
2882          return;
2883      }
2884  
2885      const CBlockIndex *pindexLast = nullptr;
2886  
2887      // We'll set already_validated_work to true if these headers are
2888      // successfully processed as part of a low-work headers sync in progress
2889      // (either in PRESYNC or REDOWNLOAD phase).
2890      // If true, this will mean that any headers returned to us (ie during
2891      // REDOWNLOAD) can be validated without further anti-DoS checks.
2892      bool already_validated_work = false;
2893  
2894      // If we're in the middle of headers sync, let it do its magic.
2895      bool have_headers_sync = false;
2896      {
2897          LOCK(peer.m_headers_sync_mutex);
2898  
2899          already_validated_work = IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
2900  
2901          // The headers we passed in may have been:
2902          // - untouched, perhaps if no headers-sync was in progress, or some
2903          //   failure occurred
2904          // - erased, such as if the headers were successfully processed and no
2905          //   additional headers processing needs to take place (such as if we
2906          //   are still in PRESYNC)
2907          // - replaced with headers that are now ready for validation, such as
2908          //   during the REDOWNLOAD phase of a low-work headers sync.
2909          // So just check whether we still have headers that we need to process,
2910          // or not.
2911          if (headers.empty()) {
2912              return;
2913          }
2914  
2915          have_headers_sync = !!peer.m_headers_sync;
2916      }
2917  
2918      // Do these headers connect to something in our block index?
2919      const CBlockIndex *chain_start_header{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock))};
2920      bool headers_connect_blockindex{chain_start_header != nullptr};
2921  
2922      if (!headers_connect_blockindex) {
2923          // This could be a BIP 130 block announcement, use
2924          // special logic for handling headers that don't connect, as this
2925          // could be benign.
2926          HandleUnconnectingHeaders(pfrom, peer, headers);
2927          return;
2928      }
2929  
2930      // If headers connect, assume that this is in response to any outstanding getheaders
2931      // request we may have sent, and clear out the time of our last request. Non-connecting
2932      // headers cannot be a response to a getheaders request.
2933      peer.m_last_getheaders_timestamp = {};
2934  
2935      // If the headers we received are already in memory and an ancestor of
2936      // m_best_header or our tip, skip anti-DoS checks. These headers will not
2937      // use any more memory (and we are not leaking information that could be
2938      // used to fingerprint us).
2939      const CBlockIndex *last_received_header{nullptr};
2940      {
2941          LOCK(cs_main);
2942          last_received_header = m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash());
2943          if (IsAncestorOfBestHeaderOrTip(last_received_header)) {
2944              already_validated_work = true;
2945          }
2946      }
2947  
2948      // If our peer has NetPermissionFlags::NoBan privileges, then bypass our
2949      // anti-DoS logic (this saves bandwidth when we connect to a trusted peer
2950      // on startup).
2951      if (pfrom.HasPermission(NetPermissionFlags::NoBan)) {
2952          already_validated_work = true;
2953      }
2954  
2955      // At this point, the headers connect to something in our block index.
2956      // Do anti-DoS checks to determine if we should process or store for later
2957      // processing.
2958      if (!already_validated_work && TryLowWorkHeadersSync(peer, pfrom,
2959                  chain_start_header, headers)) {
2960          // If we successfully started a low-work headers sync, then there
2961          // should be no headers to process any further.
2962          Assume(headers.empty());
2963          return;
2964      }
2965  
2966      // At this point, we have a set of headers with sufficient work on them
2967      // which can be processed.
2968  
2969      // If we don't have the last header, then this peer will have given us
2970      // something new (if these headers are valid).
2971      bool received_new_header{last_received_header == nullptr};
2972  
2973      // Now process all the headers.
2974      BlockValidationState state;
2975      const bool processed{m_chainman.ProcessNewBlockHeaders(headers,
2976                                                             /*min_pow_checked=*/true,
2977                                                             state, &pindexLast)};
2978      if (!processed) {
2979          if (state.IsInvalid()) {
2980              MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received");
2981              return;
2982          }
2983      }
2984      assert(pindexLast);
2985  
2986      if (processed && received_new_header) {
2987          LogBlockHeader(*pindexLast, pfrom, /*via_compact_block=*/false);
2988      }
2989  
2990      // Consider fetching more headers if we are not using our headers-sync mechanism.
2991      if (nCount == m_opts.max_headers_result && !have_headers_sync) {
2992          // Headers message had its maximum size; the peer may have more headers.
2993          if (MaybeSendGetHeaders(pfrom, GetLocator(pindexLast), peer)) {
2994              LogDebug(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
2995                      pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
2996          }
2997      }
2998  
2999      UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast, received_new_header, nCount == m_opts.max_headers_result);
3000  
3001      // Consider immediately downloading blocks.
3002      HeadersDirectFetchBlocks(pfrom, peer, *pindexLast);
3003  
3004      return;
3005  }
3006  
3007  std::optional<node::PackageToValidate> PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx, const TxValidationState& state,
3008                                         bool first_time_failure)
3009  {
3010      AssertLockNotHeld(m_peer_mutex);
3011      AssertLockHeld(g_msgproc_mutex);
3012      AssertLockHeld(m_tx_download_mutex);
3013  
3014      PeerRef peer{GetPeerRef(nodeid)};
3015  
3016      LogDebug(BCLog::MEMPOOLREJ, "%s (wtxid=%s) from peer=%d was not accepted: %s\n",
3017          ptx->GetHash().ToString(),
3018          ptx->GetWitnessHash().ToString(),
3019          nodeid,
3020          state.ToString());
3021  
3022      const auto& [add_extra_compact_tx, unique_parents, package_to_validate] = m_txdownloadman.MempoolRejectedTx(ptx, state, nodeid, first_time_failure);
3023  
3024      if (add_extra_compact_tx && RecursiveDynamicUsage(*ptx) < 100000) {
3025          AddToCompactExtraTransactions(ptx);
3026      }
3027      for (const Txid& parent_txid : unique_parents) {
3028          if (peer) AddKnownTx(*peer, parent_txid);
3029      }
3030  
3031      MaybePunishNodeForTx(nodeid, state);
3032  
3033      return package_to_validate;
3034  }
3035  
3036  void PeerManagerImpl::ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list<CTransactionRef>& replaced_transactions)
3037  {
3038      AssertLockNotHeld(m_peer_mutex);
3039      AssertLockHeld(g_msgproc_mutex);
3040      AssertLockHeld(m_tx_download_mutex);
3041  
3042      m_txdownloadman.MempoolAcceptedTx(tx);
3043  
3044      LogDebug(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (wtxid=%s) (poolsz %u txn, %u kB)\n",
3045               nodeid,
3046               tx->GetHash().ToString(),
3047               tx->GetWitnessHash().ToString(),
3048               m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000);
3049  
3050      RelayTransaction(tx->GetHash(), tx->GetWitnessHash());
3051  
3052      for (const CTransactionRef& removedTx : replaced_transactions) {
3053          AddToCompactExtraTransactions(removedTx);
3054      }
3055  }
3056  
3057  void PeerManagerImpl::ProcessPackageResult(const node::PackageToValidate& package_to_validate, const PackageMempoolAcceptResult& package_result)
3058  {
3059      AssertLockNotHeld(m_peer_mutex);
3060      AssertLockHeld(g_msgproc_mutex);
3061      AssertLockHeld(m_tx_download_mutex);
3062  
3063      const auto& package = package_to_validate.m_txns;
3064      const auto& senders = package_to_validate.m_senders;
3065  
3066      if (package_result.m_state.IsInvalid()) {
3067          m_txdownloadman.MempoolRejectedPackage(package);
3068      }
3069      // We currently only expect to process 1-parent-1-child packages. Remove if this changes.
3070      if (!Assume(package.size() == 2)) return;
3071  
3072      // Iterate backwards to erase in-package descendants from the orphanage before they become
3073      // relevant in AddChildrenToWorkSet.
3074      auto package_iter = package.rbegin();
3075      auto senders_iter = senders.rbegin();
3076      while (package_iter != package.rend()) {
3077          const auto& tx = *package_iter;
3078          const NodeId nodeid = *senders_iter;
3079          const auto it_result{package_result.m_tx_results.find(tx->GetWitnessHash())};
3080  
3081          // It is not guaranteed that a result exists for every transaction.
3082          if (it_result != package_result.m_tx_results.end()) {
3083              const auto& tx_result = it_result->second;
3084              switch (tx_result.m_result_type) {
3085                  case MempoolAcceptResult::ResultType::VALID:
3086                  {
3087                      ProcessValidTx(nodeid, tx, tx_result.m_replaced_transactions);
3088                      break;
3089                  }
3090                  case MempoolAcceptResult::ResultType::INVALID:
3091                  case MempoolAcceptResult::ResultType::DIFFERENT_WITNESS:
3092                  {
3093                      // Don't add to vExtraTxnForCompact, as these transactions should have already been
3094                      // added there when added to the orphanage or rejected for TX_RECONSIDERABLE.
3095                      // This should be updated if package submission is ever used for transactions
3096                      // that haven't already been validated before.
3097                      ProcessInvalidTx(nodeid, tx, tx_result.m_state, /*first_time_failure=*/false);
3098                      break;
3099                  }
3100                  case MempoolAcceptResult::ResultType::MEMPOOL_ENTRY:
3101                  {
3102                      // AlreadyHaveTx() should be catching transactions that are already in mempool.
3103                      Assume(false);
3104                      break;
3105                  }
3106              }
3107          }
3108          package_iter++;
3109          senders_iter++;
3110      }
3111  }
3112  
3113  // NOTE: the orphan processing used to be uninterruptible and quadratic, which could allow a peer to stall the node for
3114  // hours with specially crafted transactions. See https://bitcoincore.org/en/2024/07/03/disclose-orphan-dos.
3115  bool PeerManagerImpl::ProcessOrphanTx(Peer& peer)
3116  {
3117      AssertLockHeld(g_msgproc_mutex);
3118      LOCK2(::cs_main, m_tx_download_mutex);
3119  
3120      CTransactionRef porphanTx = nullptr;
3121  
3122      while (CTransactionRef porphanTx = m_txdownloadman.GetTxToReconsider(peer.m_id)) {
3123          const MempoolAcceptResult result = m_chainman.ProcessTransaction(porphanTx);
3124          const TxValidationState& state = result.m_state;
3125          const Txid& orphanHash = porphanTx->GetHash();
3126          const Wtxid& orphan_wtxid = porphanTx->GetWitnessHash();
3127  
3128          if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) {
3129              LogDebug(BCLog::TXPACKAGES, "   accepted orphan tx %s (wtxid=%s)\n", orphanHash.ToString(), orphan_wtxid.ToString());
3130              ProcessValidTx(peer.m_id, porphanTx, result.m_replaced_transactions);
3131              return true;
3132          } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
3133              LogDebug(BCLog::TXPACKAGES, "   invalid orphan tx %s (wtxid=%s) from peer=%d. %s\n",
3134                  orphanHash.ToString(),
3135                  orphan_wtxid.ToString(),
3136                  peer.m_id,
3137                  state.ToString());
3138  
3139              if (Assume(state.IsInvalid() &&
3140                         state.GetResult() != TxValidationResult::TX_UNKNOWN &&
3141                         state.GetResult() != TxValidationResult::TX_NO_MEMPOOL &&
3142                         state.GetResult() != TxValidationResult::TX_RESULT_UNSET)) {
3143                  ProcessInvalidTx(peer.m_id, porphanTx, state, /*first_time_failure=*/false);
3144              }
3145              return true;
3146          }
3147      }
3148  
3149      return false;
3150  }
3151  
3152  bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& node, Peer& peer,
3153                                                  BlockFilterType filter_type, uint32_t start_height,
3154                                                  const uint256& stop_hash, uint32_t max_height_diff,
3155                                                  const CBlockIndex*& stop_index,
3156                                                  BlockFilterIndex*& filter_index)
3157  {
3158      const bool supported_filter_type =
3159          (filter_type == BlockFilterType::BASIC &&
3160           (peer.m_our_services & NODE_COMPACT_FILTERS));
3161      if (!supported_filter_type) {
3162          LogDebug(BCLog::NET, "peer requested unsupported block filter type: %d, %s\n",
3163                   static_cast<uint8_t>(filter_type), node.DisconnectMsg(fLogIPs));
3164          node.fDisconnect = true;
3165          return false;
3166      }
3167  
3168      {
3169          LOCK(cs_main);
3170          stop_index = m_chainman.m_blockman.LookupBlockIndex(stop_hash);
3171  
3172          // Check that the stop block exists and the peer would be allowed to fetch it.
3173          if (!stop_index || !BlockRequestAllowed(stop_index)) {
3174              LogDebug(BCLog::NET, "peer requested invalid block hash: %s, %s\n",
3175                       stop_hash.ToString(), node.DisconnectMsg(fLogIPs));
3176              node.fDisconnect = true;
3177              return false;
3178          }
3179      }
3180  
3181      uint32_t stop_height = stop_index->nHeight;
3182      if (start_height > stop_height) {
3183          LogDebug(BCLog::NET, "peer sent invalid getcfilters/getcfheaders with "
3184                   "start height %d and stop height %d, %s\n",
3185                   start_height, stop_height, node.DisconnectMsg(fLogIPs));
3186          node.fDisconnect = true;
3187          return false;
3188      }
3189      if (stop_height - start_height >= max_height_diff) {
3190          LogDebug(BCLog::NET, "peer requested too many cfilters/cfheaders: %d / %d, %s\n",
3191                   stop_height - start_height + 1, max_height_diff, node.DisconnectMsg(fLogIPs));
3192          node.fDisconnect = true;
3193          return false;
3194      }
3195  
3196      filter_index = GetBlockFilterIndex(filter_type);
3197      if (!filter_index) {
3198          LogDebug(BCLog::NET, "Filter index for supported type %s not found\n", BlockFilterTypeName(filter_type));
3199          return false;
3200      }
3201  
3202      return true;
3203  }
3204  
3205  void PeerManagerImpl::ProcessGetCFilters(CNode& node, Peer& peer, DataStream& vRecv)
3206  {
3207      uint8_t filter_type_ser;
3208      uint32_t start_height;
3209      uint256 stop_hash;
3210  
3211      vRecv >> filter_type_ser >> start_height >> stop_hash;
3212  
3213      const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3214  
3215      const CBlockIndex* stop_index;
3216      BlockFilterIndex* filter_index;
3217      if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash,
3218                                     MAX_GETCFILTERS_SIZE, stop_index, filter_index)) {
3219          return;
3220      }
3221  
3222      std::vector<BlockFilter> filters;
3223      if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
3224          LogDebug(BCLog::NET, "Failed to find block filter in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
3225                       BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
3226          return;
3227      }
3228  
3229      for (const auto& filter : filters) {
3230          MakeAndPushMessage(node, NetMsgType::CFILTER, filter);
3231      }
3232  }
3233  
3234  void PeerManagerImpl::ProcessGetCFHeaders(CNode& node, Peer& peer, DataStream& vRecv)
3235  {
3236      uint8_t filter_type_ser;
3237      uint32_t start_height;
3238      uint256 stop_hash;
3239  
3240      vRecv >> filter_type_ser >> start_height >> stop_hash;
3241  
3242      const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3243  
3244      const CBlockIndex* stop_index;
3245      BlockFilterIndex* filter_index;
3246      if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash,
3247                                     MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) {
3248          return;
3249      }
3250  
3251      uint256 prev_header;
3252      if (start_height > 0) {
3253          const CBlockIndex* const prev_block =
3254              stop_index->GetAncestor(static_cast<int>(start_height - 1));
3255          if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
3256              LogDebug(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
3257                           BlockFilterTypeName(filter_type), prev_block->GetBlockHash().ToString());
3258              return;
3259          }
3260      }
3261  
3262      std::vector<uint256> filter_hashes;
3263      if (!filter_index->LookupFilterHashRange(start_height, stop_index, filter_hashes)) {
3264          LogDebug(BCLog::NET, "Failed to find block filter hashes in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
3265                       BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
3266          return;
3267      }
3268  
3269      MakeAndPushMessage(node, NetMsgType::CFHEADERS,
3270                filter_type_ser,
3271                stop_index->GetBlockHash(),
3272                prev_header,
3273                filter_hashes);
3274  }
3275  
3276  void PeerManagerImpl::ProcessGetCFCheckPt(CNode& node, Peer& peer, DataStream& vRecv)
3277  {
3278      uint8_t filter_type_ser;
3279      uint256 stop_hash;
3280  
3281      vRecv >> filter_type_ser >> stop_hash;
3282  
3283      const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3284  
3285      const CBlockIndex* stop_index;
3286      BlockFilterIndex* filter_index;
3287      if (!PrepareBlockFilterRequest(node, peer, filter_type, /*start_height=*/0, stop_hash,
3288                                     /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
3289                                     stop_index, filter_index)) {
3290          return;
3291      }
3292  
3293      std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
3294  
3295      // Populate headers.
3296      const CBlockIndex* block_index = stop_index;
3297      for (int i = headers.size() - 1; i >= 0; i--) {
3298          int height = (i + 1) * CFCHECKPT_INTERVAL;
3299          block_index = block_index->GetAncestor(height);
3300  
3301          if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
3302              LogDebug(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
3303                           BlockFilterTypeName(filter_type), block_index->GetBlockHash().ToString());
3304              return;
3305          }
3306      }
3307  
3308      MakeAndPushMessage(node, NetMsgType::CFCHECKPT,
3309                filter_type_ser,
3310                stop_index->GetBlockHash(),
3311                headers);
3312  }
3313  
3314  void PeerManagerImpl::ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked)
3315  {
3316      bool new_block{false};
3317      m_chainman.ProcessNewBlock(block, force_processing, min_pow_checked, &new_block);
3318      if (new_block) {
3319          node.m_last_block_time = GetTime<std::chrono::seconds>();
3320          // In case this block came from a different peer than we requested
3321          // from, we can erase the block request now anyway (as we just stored
3322          // this block to disk).
3323          LOCK(cs_main);
3324          RemoveBlockRequest(block->GetHash(), std::nullopt);
3325      } else {
3326          LOCK(cs_main);
3327          mapBlockSource.erase(block->GetHash());
3328      }
3329  }
3330  
3331  void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions)
3332  {
3333      std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3334      bool fBlockRead{false};
3335      {
3336          LOCK(cs_main);
3337  
3338          auto range_flight = mapBlocksInFlight.equal_range(block_transactions.blockhash);
3339          size_t already_in_flight = std::distance(range_flight.first, range_flight.second);
3340          bool requested_block_from_this_peer{false};
3341  
3342          // Multimap ensures ordering of outstanding requests. It's either empty or first in line.
3343          bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId());
3344  
3345          while (range_flight.first != range_flight.second) {
3346              auto [node_id, block_it] = range_flight.first->second;
3347              if (node_id == pfrom.GetId() && block_it->partialBlock) {
3348                  requested_block_from_this_peer = true;
3349                  break;
3350              }
3351              range_flight.first++;
3352          }
3353  
3354          if (!requested_block_from_this_peer) {
3355              LogDebug(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom.GetId());
3356              return;
3357          }
3358  
3359          PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock;
3360          ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn);
3361          if (status == READ_STATUS_INVALID) {
3362              RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect
3363              Misbehaving(peer, "invalid compact block/non-matching block transactions");
3364              return;
3365          } else if (status == READ_STATUS_FAILED) {
3366              if (first_in_flight) {
3367                  // Might have collided, fall back to getdata now :(
3368                  std::vector<CInv> invs;
3369                  invs.emplace_back(MSG_BLOCK | GetFetchFlags(peer), block_transactions.blockhash);
3370                  MakeAndPushMessage(pfrom, NetMsgType::GETDATA, invs);
3371              } else {
3372                  RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId());
3373                  LogDebug(BCLog::NET, "Peer %d sent us a compact block but it failed to reconstruct, waiting on first download to complete\n", pfrom.GetId());
3374                  return;
3375              }
3376          } else {
3377              // Block is either okay, or possibly we received
3378              // READ_STATUS_CHECKBLOCK_FAILED.
3379              // Note that CheckBlock can only fail for one of a few reasons:
3380              // 1. bad-proof-of-work (impossible here, because we've already
3381              //    accepted the header)
3382              // 2. merkleroot doesn't match the transactions given (already
3383              //    caught in FillBlock with READ_STATUS_FAILED, so
3384              //    impossible here)
3385              // 3. the block is otherwise invalid (eg invalid coinbase,
3386              //    block is too big, too many legacy sigops, etc).
3387              // So if CheckBlock failed, #3 is the only possibility.
3388              // Under BIP 152, we don't discourage the peer unless proof of work is
3389              // invalid (we don't require all the stateless checks to have
3390              // been run).  This is handled below, so just treat this as
3391              // though the block was successfully read, and rely on the
3392              // handling in ProcessNewBlock to ensure the block index is
3393              // updated, etc.
3394              RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // it is now an empty pointer
3395              fBlockRead = true;
3396              // mapBlockSource is used for potentially punishing peers and
3397              // updating which peers send us compact blocks, so the race
3398              // between here and cs_main in ProcessNewBlock is fine.
3399              // BIP 152 permits peers to relay compact blocks after validating
3400              // the header only; we should not punish peers if the block turns
3401              // out to be invalid.
3402              mapBlockSource.emplace(block_transactions.blockhash, std::make_pair(pfrom.GetId(), false));
3403          }
3404      } // Don't hold cs_main when we call into ProcessNewBlock
3405      if (fBlockRead) {
3406          // Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
3407          // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
3408          // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
3409          // disk-space attacks), but this should be safe due to the
3410          // protections in the compact block handler -- see related comment
3411          // in compact block optimistic reconstruction handling.
3412          ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true);
3413      }
3414      return;
3415  }
3416  
3417  void PeerManagerImpl::LogBlockHeader(const CBlockIndex& index, const CNode& peer, bool via_compact_block) {
3418      // To prevent log spam, this function should only be called after it was determined that a
3419      // header is both new and valid.
3420      //
3421      // These messages are valuable for detecting potential selfish mining behavior;
3422      // if multiple displacing headers are seen near simultaneously across many
3423      // nodes in the network, this might be an indication of selfish mining.
3424      // In addition it can be used to identify peers which send us a header, but
3425      // don't followup with a complete and valid (compact) block.
3426      // Having this log by default when not in IBD ensures broad availability of
3427      // this data in case investigation is merited.
3428      const auto msg = strprintf(
3429          "Saw new %sheader hash=%s height=%d peer=%d%s",
3430          via_compact_block ? "cmpctblock " : "",
3431          index.GetBlockHash().ToString(),
3432          index.nHeight,
3433          peer.GetId(),
3434          peer.LogIP(fLogIPs)
3435      );
3436      if (m_chainman.IsInitialBlockDownload()) {
3437          LogDebug(BCLog::VALIDATION, "%s", msg);
3438      } else {
3439          LogInfo("%s", msg);
3440      }
3441  }
3442  
3443  void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, DataStream& vRecv,
3444                                       const std::chrono::microseconds time_received,
3445                                       const std::atomic<bool>& interruptMsgProc)
3446  {
3447      AssertLockHeld(g_msgproc_mutex);
3448  
3449      LogDebug(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
3450  
3451      PeerRef peer = GetPeerRef(pfrom.GetId());
3452      if (peer == nullptr) return;
3453  
3454      if (msg_type == NetMsgType::VERSION) {
3455          if (pfrom.nVersion != 0) {
3456              LogDebug(BCLog::NET, "redundant version message from peer=%d\n", pfrom.GetId());
3457              return;
3458          }
3459  
3460          int64_t nTime;
3461          CService addrMe;
3462          uint64_t nNonce = 1;
3463          ServiceFlags nServices;
3464          int nVersion;
3465          std::string cleanSubVer;
3466          int starting_height = -1;
3467          bool fRelay = true;
3468  
3469          vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime;
3470          if (nTime < 0) {
3471              nTime = 0;
3472          }
3473          vRecv.ignore(8); // Ignore the addrMe service bits sent by the peer
3474          vRecv >> CNetAddr::V1(addrMe);
3475          if (!pfrom.IsInboundConn())
3476          {
3477              // Overwrites potentially existing services. In contrast to this,
3478              // unvalidated services received via gossip relay in ADDR/ADDRV2
3479              // messages are only ever added but cannot replace existing ones.
3480              m_addrman.SetServices(pfrom.addr, nServices);
3481          }
3482          if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices))
3483          {
3484              LogDebug(BCLog::NET, "peer does not offer the expected services (%08x offered, %08x expected), %s\n",
3485                       nServices,
3486                       GetDesirableServiceFlags(nServices),
3487                       pfrom.DisconnectMsg(fLogIPs));
3488              pfrom.fDisconnect = true;
3489              return;
3490          }
3491  
3492          if (nVersion < MIN_PEER_PROTO_VERSION) {
3493              // disconnect from peers older than this proto version
3494              LogDebug(BCLog::NET, "peer using obsolete version %i, %s\n", nVersion, pfrom.DisconnectMsg(fLogIPs));
3495              pfrom.fDisconnect = true;
3496              return;
3497          }
3498  
3499          if (!vRecv.empty()) {
3500              // The version message includes information about the sending node which we don't use:
3501              //   - 8 bytes (service bits)
3502              //   - 16 bytes (ipv6 address)
3503              //   - 2 bytes (port)
3504              vRecv.ignore(26);
3505              vRecv >> nNonce;
3506          }
3507          if (!vRecv.empty()) {
3508              std::string strSubVer;
3509              vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
3510              cleanSubVer = SanitizeString(strSubVer);
3511          }
3512          if (!vRecv.empty()) {
3513              vRecv >> starting_height;
3514          }
3515          if (!vRecv.empty())
3516              vRecv >> fRelay;
3517          // Disconnect if we connected to ourself
3518          if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce))
3519          {
3520              LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToStringAddrPort());
3521              pfrom.fDisconnect = true;
3522              return;
3523          }
3524  
3525          if (pfrom.IsInboundConn() && addrMe.IsRoutable())
3526          {
3527              SeenLocal(addrMe);
3528          }
3529  
3530          // Inbound peers send us their version message when they connect.
3531          // We send our version message in response.
3532          if (pfrom.IsInboundConn()) {
3533              PushNodeVersion(pfrom, *peer);
3534          }
3535  
3536          // Change version
3537          const int greatest_common_version = std::min(nVersion, PROTOCOL_VERSION);
3538          pfrom.SetCommonVersion(greatest_common_version);
3539          pfrom.nVersion = nVersion;
3540  
3541          if (greatest_common_version >= WTXID_RELAY_VERSION) {
3542              MakeAndPushMessage(pfrom, NetMsgType::WTXIDRELAY);
3543          }
3544  
3545          // Signal ADDRv2 support (BIP155).
3546          if (greatest_common_version >= 70016) {
3547              // BIP155 defines addrv2 and sendaddrv2 for all protocol versions, but some
3548              // implementations reject messages they don't know. As a courtesy, don't send
3549              // it to nodes with a version before 70016, as no software is known to support
3550              // BIP155 that doesn't announce at least that protocol version number.
3551              MakeAndPushMessage(pfrom, NetMsgType::SENDADDRV2);
3552          }
3553  
3554          pfrom.m_has_all_wanted_services = HasAllDesirableServiceFlags(nServices);
3555          peer->m_their_services = nServices;
3556          pfrom.SetAddrLocal(addrMe);
3557          {
3558              LOCK(pfrom.m_subver_mutex);
3559              pfrom.cleanSubVer = cleanSubVer;
3560          }
3561          peer->m_starting_height = starting_height;
3562  
3563          // Only initialize the Peer::TxRelay m_relay_txs data structure if:
3564          // - this isn't an outbound block-relay-only connection, and
3565          // - this isn't an outbound feeler connection, and
3566          // - fRelay=true (the peer wishes to receive transaction announcements)
3567          //   or we're offering NODE_BLOOM to this peer. NODE_BLOOM means that
3568          //   the peer may turn on transaction relay later.
3569          if (!pfrom.IsBlockOnlyConn() &&
3570              !pfrom.IsFeelerConn() &&
3571              (fRelay || (peer->m_our_services & NODE_BLOOM))) {
3572              auto* const tx_relay = peer->SetTxRelay();
3573              {
3574                  LOCK(tx_relay->m_bloom_filter_mutex);
3575                  tx_relay->m_relay_txs = fRelay; // set to true after we get the first filter* message
3576              }
3577              if (fRelay) pfrom.m_relays_txs = true;
3578          }
3579  
3580          if (greatest_common_version >= WTXID_RELAY_VERSION && m_txreconciliation) {
3581              // Per BIP-330, we announce txreconciliation support if:
3582              // - protocol version per the peer's VERSION message supports WTXID_RELAY;
3583              // - transaction relay is supported per the peer's VERSION message
3584              // - this is not a block-relay-only connection and not a feeler
3585              // - this is not an addr fetch connection;
3586              // - we are not in -blocksonly mode.
3587              const auto* tx_relay = peer->GetTxRelay();
3588              if (tx_relay && WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs) &&
3589                  !pfrom.IsAddrFetchConn() && !m_opts.ignore_incoming_txs) {
3590                  const uint64_t recon_salt = m_txreconciliation->PreRegisterPeer(pfrom.GetId());
3591                  MakeAndPushMessage(pfrom, NetMsgType::SENDTXRCNCL,
3592                                     TXRECONCILIATION_VERSION, recon_salt);
3593              }
3594          }
3595  
3596          MakeAndPushMessage(pfrom, NetMsgType::VERACK);
3597  
3598          // Potentially mark this peer as a preferred download peer.
3599          {
3600              LOCK(cs_main);
3601              CNodeState* state = State(pfrom.GetId());
3602              state->fPreferredDownload = (!pfrom.IsInboundConn() || pfrom.HasPermission(NetPermissionFlags::NoBan)) && !pfrom.IsAddrFetchConn() && CanServeBlocks(*peer);
3603              m_num_preferred_download_peers += state->fPreferredDownload;
3604          }
3605  
3606          // Attempt to initialize address relay for outbound peers and use result
3607          // to decide whether to send GETADDR, so that we don't send it to
3608          // inbound or outbound block-relay-only peers.
3609          bool send_getaddr{false};
3610          if (!pfrom.IsInboundConn()) {
3611              send_getaddr = SetupAddressRelay(pfrom, *peer);
3612          }
3613          if (send_getaddr) {
3614              // Do a one-time address fetch to help populate/update our addrman.
3615              // If we're starting up for the first time, our addrman may be pretty
3616              // empty, so this mechanism is important to help us connect to the network.
3617              // We skip this for block-relay-only peers. We want to avoid
3618              // potentially leaking addr information and we do not want to
3619              // indicate to the peer that we will participate in addr relay.
3620              MakeAndPushMessage(pfrom, NetMsgType::GETADDR);
3621              peer->m_getaddr_sent = true;
3622              // When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND addresses in response
3623              // (bypassing the MAX_ADDR_PROCESSING_TOKEN_BUCKET limit).
3624              peer->m_addr_token_bucket += MAX_ADDR_TO_SEND;
3625          }
3626  
3627          if (!pfrom.IsInboundConn()) {
3628              // For non-inbound connections, we update the addrman to record
3629              // connection success so that addrman will have an up-to-date
3630              // notion of which peers are online and available.
3631              //
3632              // While we strive to not leak information about block-relay-only
3633              // connections via the addrman, not moving an address to the tried
3634              // table is also potentially detrimental because new-table entries
3635              // are subject to eviction in the event of addrman collisions.  We
3636              // mitigate the information-leak by never calling
3637              // AddrMan::Connected() on block-relay-only peers; see
3638              // FinalizeNode().
3639              //
3640              // This moves an address from New to Tried table in Addrman,
3641              // resolves tried-table collisions, etc.
3642              m_addrman.Good(pfrom.addr);
3643          }
3644  
3645          const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)};
3646          LogDebug(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, txrelay=%d, peer=%d%s%s\n",
3647                    cleanSubVer, pfrom.nVersion,
3648                    peer->m_starting_height, addrMe.ToStringAddrPort(), fRelay, pfrom.GetId(),
3649                    pfrom.LogIP(fLogIPs), (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""));
3650  
3651          peer->m_time_offset = NodeSeconds{std::chrono::seconds{nTime}} - Now<NodeSeconds>();
3652          if (!pfrom.IsInboundConn()) {
3653              // Don't use timedata samples from inbound peers to make it
3654              // harder for others to create false warnings about our clock being out of sync.
3655              m_outbound_time_offsets.Add(peer->m_time_offset);
3656              m_outbound_time_offsets.WarnIfOutOfSync();
3657          }
3658  
3659          // If the peer is old enough to have the old alert system, send it the final alert.
3660          if (greatest_common_version <= 70012) {
3661              constexpr auto finalAlert{"60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"_hex};
3662              MakeAndPushMessage(pfrom, "alert", finalAlert);
3663          }
3664  
3665          // Feeler connections exist only to verify if address is online.
3666          if (pfrom.IsFeelerConn()) {
3667              LogDebug(BCLog::NET, "feeler connection completed, %s\n", pfrom.DisconnectMsg(fLogIPs));
3668              pfrom.fDisconnect = true;
3669          }
3670          return;
3671      }
3672  
3673      if (pfrom.nVersion == 0) {
3674          // Must have a version message before anything else
3675          LogDebug(BCLog::NET, "non-version message before version handshake. Message \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
3676          return;
3677      }
3678  
3679      if (msg_type == NetMsgType::VERACK) {
3680          if (pfrom.fSuccessfullyConnected) {
3681              LogDebug(BCLog::NET, "ignoring redundant verack message from peer=%d\n", pfrom.GetId());
3682              return;
3683          }
3684  
3685          // Log successful connections unconditionally for outbound, but not for inbound as those
3686          // can be triggered by an attacker at high rate.
3687          if (!pfrom.IsInboundConn() || LogAcceptCategory(BCLog::NET, BCLog::Level::Debug)) {
3688              const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)};
3689              LogPrintf("New %s %s peer connected: version: %d, blocks=%d, peer=%d%s%s\n",
3690                        pfrom.ConnectionTypeAsString(),
3691                        TransportTypeAsString(pfrom.m_transport->GetInfo().transport_type),
3692                        pfrom.nVersion.load(), peer->m_starting_height,
3693                        pfrom.GetId(), pfrom.LogIP(fLogIPs),
3694                        (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""));
3695          }
3696  
3697          if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) {
3698              // Tell our peer we are willing to provide version 2 cmpctblocks.
3699              // However, we do not request new block announcements using
3700              // cmpctblock messages.
3701              // We send this to non-NODE NETWORK peers as well, because
3702              // they may wish to request compact blocks from us
3703              MakeAndPushMessage(pfrom, NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION);
3704          }
3705  
3706          if (m_txreconciliation) {
3707              if (!peer->m_wtxid_relay || !m_txreconciliation->IsPeerRegistered(pfrom.GetId())) {
3708                  // We could have optimistically pre-registered/registered the peer. In that case,
3709                  // we should forget about the reconciliation state here if this wasn't followed
3710                  // by WTXIDRELAY (since WTXIDRELAY can't be announced later).
3711                  m_txreconciliation->ForgetPeer(pfrom.GetId());
3712              }
3713          }
3714  
3715          if (auto tx_relay = peer->GetTxRelay()) {
3716              // `TxRelay::m_tx_inventory_to_send` must be empty before the
3717              // version handshake is completed as
3718              // `TxRelay::m_next_inv_send_time` is first initialised in
3719              // `SendMessages` after the verack is received. Any transactions
3720              // received during the version handshake would otherwise
3721              // immediately be advertised without random delay, potentially
3722              // leaking the time of arrival to a spy.
3723              Assume(WITH_LOCK(
3724                  tx_relay->m_tx_inventory_mutex,
3725                  return tx_relay->m_tx_inventory_to_send.empty() &&
3726                         tx_relay->m_next_inv_send_time == 0s));
3727          }
3728  
3729          {
3730              LOCK2(::cs_main, m_tx_download_mutex);
3731              const CNodeState* state = State(pfrom.GetId());
3732              m_txdownloadman.ConnectedPeer(pfrom.GetId(), node::TxDownloadConnectionInfo {
3733                  .m_preferred = state->fPreferredDownload,
3734                  .m_relay_permissions = pfrom.HasPermission(NetPermissionFlags::Relay),
3735                  .m_wtxid_relay = peer->m_wtxid_relay,
3736              });
3737          }
3738  
3739          pfrom.fSuccessfullyConnected = true;
3740          return;
3741      }
3742  
3743      if (msg_type == NetMsgType::SENDHEADERS) {
3744          peer->m_prefers_headers = true;
3745          return;
3746      }
3747  
3748      if (msg_type == NetMsgType::SENDCMPCT) {
3749          bool sendcmpct_hb{false};
3750          uint64_t sendcmpct_version{0};
3751          vRecv >> sendcmpct_hb >> sendcmpct_version;
3752  
3753          // Only support compact block relay with witnesses
3754          if (sendcmpct_version != CMPCTBLOCKS_VERSION) return;
3755  
3756          LOCK(cs_main);
3757          CNodeState* nodestate = State(pfrom.GetId());
3758          nodestate->m_provides_cmpctblocks = true;
3759          nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb;
3760          // save whether peer selects us as BIP152 high-bandwidth peer
3761          // (receiving sendcmpct(1) signals high-bandwidth, sendcmpct(0) low-bandwidth)
3762          pfrom.m_bip152_highbandwidth_from = sendcmpct_hb;
3763          return;
3764      }
3765  
3766      // BIP339 defines feature negotiation of wtxidrelay, which must happen between
3767      // VERSION and VERACK to avoid relay problems from switching after a connection is up.
3768      if (msg_type == NetMsgType::WTXIDRELAY) {
3769          if (pfrom.fSuccessfullyConnected) {
3770              // Disconnect peers that send a wtxidrelay message after VERACK.
3771              LogDebug(BCLog::NET, "wtxidrelay received after verack, %s\n", pfrom.DisconnectMsg(fLogIPs));
3772              pfrom.fDisconnect = true;
3773              return;
3774          }
3775          if (pfrom.GetCommonVersion() >= WTXID_RELAY_VERSION) {
3776              if (!peer->m_wtxid_relay) {
3777                  peer->m_wtxid_relay = true;
3778                  m_wtxid_relay_peers++;
3779              } else {
3780                  LogDebug(BCLog::NET, "ignoring duplicate wtxidrelay from peer=%d\n", pfrom.GetId());
3781              }
3782          } else {
3783              LogDebug(BCLog::NET, "ignoring wtxidrelay due to old common version=%d from peer=%d\n", pfrom.GetCommonVersion(), pfrom.GetId());
3784          }
3785          return;
3786      }
3787  
3788      // BIP155 defines feature negotiation of addrv2 and sendaddrv2, which must happen
3789      // between VERSION and VERACK.
3790      if (msg_type == NetMsgType::SENDADDRV2) {
3791          if (pfrom.fSuccessfullyConnected) {
3792              // Disconnect peers that send a SENDADDRV2 message after VERACK.
3793              LogDebug(BCLog::NET, "sendaddrv2 received after verack, %s\n", pfrom.DisconnectMsg(fLogIPs));
3794              pfrom.fDisconnect = true;
3795              return;
3796          }
3797          peer->m_wants_addrv2 = true;
3798          return;
3799      }
3800  
3801      // Received from a peer demonstrating readiness to announce transactions via reconciliations.
3802      // This feature negotiation must happen between VERSION and VERACK to avoid relay problems
3803      // from switching announcement protocols after the connection is up.
3804      if (msg_type == NetMsgType::SENDTXRCNCL) {
3805          if (!m_txreconciliation) {
3806              LogDebug(BCLog::NET, "sendtxrcncl from peer=%d ignored, as our node does not have txreconciliation enabled\n", pfrom.GetId());
3807              return;
3808          }
3809  
3810          if (pfrom.fSuccessfullyConnected) {
3811              LogDebug(BCLog::NET, "sendtxrcncl received after verack, %s\n", pfrom.DisconnectMsg(fLogIPs));
3812              pfrom.fDisconnect = true;
3813              return;
3814          }
3815  
3816          // Peer must not offer us reconciliations if we specified no tx relay support in VERSION.
3817          if (RejectIncomingTxs(pfrom)) {
3818              LogDebug(BCLog::NET, "sendtxrcncl received to which we indicated no tx relay, %s\n", pfrom.DisconnectMsg(fLogIPs));
3819              pfrom.fDisconnect = true;
3820              return;
3821          }
3822  
3823          // Peer must not offer us reconciliations if they specified no tx relay support in VERSION.
3824          // This flag might also be false in other cases, but the RejectIncomingTxs check above
3825          // eliminates them, so that this flag fully represents what we are looking for.
3826          const auto* tx_relay = peer->GetTxRelay();
3827          if (!tx_relay || !WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs)) {
3828              LogDebug(BCLog::NET, "sendtxrcncl received which indicated no tx relay to us, %s\n", pfrom.DisconnectMsg(fLogIPs));
3829              pfrom.fDisconnect = true;
3830              return;
3831          }
3832  
3833          uint32_t peer_txreconcl_version;
3834          uint64_t remote_salt;
3835          vRecv >> peer_txreconcl_version >> remote_salt;
3836  
3837          const ReconciliationRegisterResult result = m_txreconciliation->RegisterPeer(pfrom.GetId(), pfrom.IsInboundConn(),
3838                                                                                       peer_txreconcl_version, remote_salt);
3839          switch (result) {
3840          case ReconciliationRegisterResult::NOT_FOUND:
3841              LogDebug(BCLog::NET, "Ignore unexpected txreconciliation signal from peer=%d\n", pfrom.GetId());
3842              break;
3843          case ReconciliationRegisterResult::SUCCESS:
3844              break;
3845          case ReconciliationRegisterResult::ALREADY_REGISTERED:
3846              LogDebug(BCLog::NET, "txreconciliation protocol violation (sendtxrcncl received from already registered peer), %s\n", pfrom.DisconnectMsg(fLogIPs));
3847              pfrom.fDisconnect = true;
3848              return;
3849          case ReconciliationRegisterResult::PROTOCOL_VIOLATION:
3850              LogDebug(BCLog::NET, "txreconciliation protocol violation, %s\n", pfrom.DisconnectMsg(fLogIPs));
3851              pfrom.fDisconnect = true;
3852              return;
3853          }
3854          return;
3855      }
3856  
3857      if (!pfrom.fSuccessfullyConnected) {
3858          LogDebug(BCLog::NET, "Unsupported message \"%s\" prior to verack from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
3859          return;
3860      }
3861  
3862      if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) {
3863          const auto ser_params{
3864              msg_type == NetMsgType::ADDRV2 ?
3865              // Set V2 param so that the CNetAddr and CAddress
3866              // unserialize methods know that an address in v2 format is coming.
3867              CAddress::V2_NETWORK :
3868              CAddress::V1_NETWORK,
3869          };
3870  
3871          std::vector<CAddress> vAddr;
3872  
3873          vRecv >> ser_params(vAddr);
3874  
3875          if (!SetupAddressRelay(pfrom, *peer)) {
3876              LogDebug(BCLog::NET, "ignoring %s message from %s peer=%d\n", msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId());
3877              return;
3878          }
3879  
3880          if (vAddr.size() > MAX_ADDR_TO_SEND)
3881          {
3882              Misbehaving(*peer, strprintf("%s message size = %u", msg_type, vAddr.size()));
3883              return;
3884          }
3885  
3886          // Store the new addresses
3887          std::vector<CAddress> vAddrOk;
3888          const auto current_a_time{Now<NodeSeconds>()};
3889  
3890          // Update/increment addr rate limiting bucket.
3891          const auto current_time{GetTime<std::chrono::microseconds>()};
3892          if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) {
3893              // Don't increment bucket if it's already full
3894              const auto time_diff = std::max(current_time - peer->m_addr_token_timestamp, 0us);
3895              const double increment = Ticks<SecondsDouble>(time_diff) * MAX_ADDR_RATE_PER_SECOND;
3896              peer->m_addr_token_bucket = std::min<double>(peer->m_addr_token_bucket + increment, MAX_ADDR_PROCESSING_TOKEN_BUCKET);
3897          }
3898          peer->m_addr_token_timestamp = current_time;
3899  
3900          const bool rate_limited = !pfrom.HasPermission(NetPermissionFlags::Addr);
3901          uint64_t num_proc = 0;
3902          uint64_t num_rate_limit = 0;
3903          std::shuffle(vAddr.begin(), vAddr.end(), m_rng);
3904          for (CAddress& addr : vAddr)
3905          {
3906              if (interruptMsgProc)
3907                  return;
3908  
3909              // Apply rate limiting.
3910              if (peer->m_addr_token_bucket < 1.0) {
3911                  if (rate_limited) {
3912                      ++num_rate_limit;
3913                      continue;
3914                  }
3915              } else {
3916                  peer->m_addr_token_bucket -= 1.0;
3917              }
3918              // We only bother storing full nodes, though this may include
3919              // things which we would not make an outbound connection to, in
3920              // part because we may make feeler connections to them.
3921              if (!MayHaveUsefulAddressDB(addr.nServices) && !HasAllDesirableServiceFlags(addr.nServices))
3922                  continue;
3923  
3924              if (addr.nTime <= NodeSeconds{100000000s} || addr.nTime > current_a_time + 10min) {
3925                  addr.nTime = current_a_time - 5 * 24h;
3926              }
3927              AddAddressKnown(*peer, addr);
3928              if (m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
3929                  // Do not process banned/discouraged addresses beyond remembering we received them
3930                  continue;
3931              }
3932              ++num_proc;
3933              const bool reachable{g_reachable_nets.Contains(addr)};
3934              if (addr.nTime > current_a_time - 10min && !peer->m_getaddr_sent && vAddr.size() <= 10 && addr.IsRoutable()) {
3935                  // Relay to a limited number of other nodes
3936                  RelayAddress(pfrom.GetId(), addr, reachable);
3937              }
3938              // Do not store addresses outside our network
3939              if (reachable) {
3940                  vAddrOk.push_back(addr);
3941              }
3942          }
3943          peer->m_addr_processed += num_proc;
3944          peer->m_addr_rate_limited += num_rate_limit;
3945          LogDebug(BCLog::NET, "Received addr: %u addresses (%u processed, %u rate-limited) from peer=%d\n",
3946                   vAddr.size(), num_proc, num_rate_limit, pfrom.GetId());
3947  
3948          m_addrman.Add(vAddrOk, pfrom.addr, 2h);
3949          if (vAddr.size() < 1000) peer->m_getaddr_sent = false;
3950  
3951          // AddrFetch: Require multiple addresses to avoid disconnecting on self-announcements
3952          if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) {
3953              LogDebug(BCLog::NET, "addrfetch connection completed, %s\n", pfrom.DisconnectMsg(fLogIPs));
3954              pfrom.fDisconnect = true;
3955          }
3956          return;
3957      }
3958  
3959      if (msg_type == NetMsgType::INV) {
3960          std::vector<CInv> vInv;
3961          vRecv >> vInv;
3962          if (vInv.size() > MAX_INV_SZ)
3963          {
3964              Misbehaving(*peer, strprintf("inv message size = %u", vInv.size()));
3965              return;
3966          }
3967  
3968          const bool reject_tx_invs{RejectIncomingTxs(pfrom)};
3969  
3970          LOCK2(cs_main, m_tx_download_mutex);
3971  
3972          const auto current_time{GetTime<std::chrono::microseconds>()};
3973          uint256* best_block{nullptr};
3974  
3975          for (CInv& inv : vInv) {
3976              if (interruptMsgProc) return;
3977  
3978              // Ignore INVs that don't match wtxidrelay setting.
3979              // Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting.
3980              // This is fine as no INV messages are involved in that process.
3981              if (peer->m_wtxid_relay) {
3982                  if (inv.IsMsgTx()) continue;
3983              } else {
3984                  if (inv.IsMsgWtx()) continue;
3985              }
3986  
3987              if (inv.IsMsgBlk()) {
3988                  const bool fAlreadyHave = AlreadyHaveBlock(inv.hash);
3989                  LogDebug(BCLog::NET, "got inv: %s  %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
3990  
3991                  UpdateBlockAvailability(pfrom.GetId(), inv.hash);
3992                  if (!fAlreadyHave && !m_chainman.m_blockman.LoadingBlocks() && !IsBlockRequested(inv.hash)) {
3993                      // Headers-first is the primary method of announcement on
3994                      // the network. If a node fell back to sending blocks by
3995                      // inv, it may be for a re-org, or because we haven't
3996                      // completed initial headers sync. The final block hash
3997                      // provided should be the highest, so send a getheaders and
3998                      // then fetch the blocks we need to catch up.
3999                      best_block = &inv.hash;
4000                  }
4001              } else if (inv.IsGenTxMsg()) {
4002                  if (reject_tx_invs) {
4003                      LogDebug(BCLog::NET, "transaction (%s) inv sent in violation of protocol, %s\n", inv.hash.ToString(), pfrom.DisconnectMsg(fLogIPs));
4004                      pfrom.fDisconnect = true;
4005                      return;
4006                  }
4007                  const GenTxid gtxid = ToGenTxid(inv);
4008                  AddKnownTx(*peer, inv.hash);
4009  
4010                  if (!m_chainman.IsInitialBlockDownload()) {
4011                      const bool fAlreadyHave{m_txdownloadman.AddTxAnnouncement(pfrom.GetId(), gtxid, current_time)};
4012                      LogDebug(BCLog::NET, "got inv: %s  %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
4013                  }
4014              } else {
4015                  LogDebug(BCLog::NET, "Unknown inv type \"%s\" received from peer=%d\n", inv.ToString(), pfrom.GetId());
4016              }
4017          }
4018  
4019          if (best_block != nullptr) {
4020              // If we haven't started initial headers-sync with this peer, then
4021              // consider sending a getheaders now. On initial startup, there's a
4022              // reliability vs bandwidth tradeoff, where we are only trying to do
4023              // initial headers sync with one peer at a time, with a long
4024              // timeout (at which point, if the sync hasn't completed, we will
4025              // disconnect the peer and then choose another). In the meantime,
4026              // as new blocks are found, we are willing to add one new peer per
4027              // block to sync with as well, to sync quicker in the case where
4028              // our initial peer is unresponsive (but less bandwidth than we'd
4029              // use if we turned on sync with all peers).
4030              CNodeState& state{*Assert(State(pfrom.GetId()))};
4031              if (state.fSyncStarted || (!peer->m_inv_triggered_getheaders_before_sync && *best_block != m_last_block_inv_triggering_headers_sync)) {
4032                  if (MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer)) {
4033                      LogDebug(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
4034                              m_chainman.m_best_header->nHeight, best_block->ToString(),
4035                              pfrom.GetId());
4036                  }
4037                  if (!state.fSyncStarted) {
4038                      peer->m_inv_triggered_getheaders_before_sync = true;
4039                      // Update the last block hash that triggered a new headers
4040                      // sync, so that we don't turn on headers sync with more
4041                      // than 1 new peer every new block.
4042                      m_last_block_inv_triggering_headers_sync = *best_block;
4043                  }
4044              }
4045          }
4046  
4047          return;
4048      }
4049  
4050      if (msg_type == NetMsgType::GETDATA) {
4051          std::vector<CInv> vInv;
4052          vRecv >> vInv;
4053          if (vInv.size() > MAX_INV_SZ)
4054          {
4055              Misbehaving(*peer, strprintf("getdata message size = %u", vInv.size()));
4056              return;
4057          }
4058  
4059          LogDebug(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom.GetId());
4060  
4061          if (vInv.size() > 0) {
4062              LogDebug(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom.GetId());
4063          }
4064  
4065          {
4066              LOCK(peer->m_getdata_requests_mutex);
4067              peer->m_getdata_requests.insert(peer->m_getdata_requests.end(), vInv.begin(), vInv.end());
4068              ProcessGetData(pfrom, *peer, interruptMsgProc);
4069          }
4070  
4071          return;
4072      }
4073  
4074      if (msg_type == NetMsgType::GETBLOCKS) {
4075          CBlockLocator locator;
4076          uint256 hashStop;
4077          vRecv >> locator >> hashStop;
4078  
4079          if (locator.vHave.size() > MAX_LOCATOR_SZ) {
4080              LogDebug(BCLog::NET, "getblocks locator size %lld > %d, %s\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.DisconnectMsg(fLogIPs));
4081              pfrom.fDisconnect = true;
4082              return;
4083          }
4084  
4085          // We might have announced the currently-being-connected tip using a
4086          // compact block, which resulted in the peer sending a getblocks
4087          // request, which we would otherwise respond to without the new block.
4088          // To avoid this situation we simply verify that we are on our best
4089          // known chain now. This is super overkill, but we handle it better
4090          // for getheaders requests, and there are no known nodes which support
4091          // compact blocks but still use getblocks to request blocks.
4092          {
4093              std::shared_ptr<const CBlock> a_recent_block;
4094              {
4095                  LOCK(m_most_recent_block_mutex);
4096                  a_recent_block = m_most_recent_block;
4097              }
4098              BlockValidationState state;
4099              if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) {
4100                  LogDebug(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
4101              }
4102          }
4103  
4104          LOCK(cs_main);
4105  
4106          // Find the last block the caller has in the main chain
4107          const CBlockIndex* pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
4108  
4109          // Send the rest of the chain
4110          if (pindex)
4111              pindex = m_chainman.ActiveChain().Next(pindex);
4112          int nLimit = 500;
4113          LogDebug(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom.GetId());
4114          for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex))
4115          {
4116              if (pindex->GetBlockHash() == hashStop)
4117              {
4118                  LogDebug(BCLog::NET, "  getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4119                  break;
4120              }
4121              // If pruning, don't inv blocks unless we have on disk and are likely to still have
4122              // for some reasonable time window (1 hour) that block relay might require.
4123              const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
4124              if (m_chainman.m_blockman.IsPruneMode() && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight - nPrunedBlocksLikelyToHave)) {
4125                  LogDebug(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4126                  break;
4127              }
4128              WITH_LOCK(peer->m_block_inv_mutex, peer->m_blocks_for_inv_relay.push_back(pindex->GetBlockHash()));
4129              if (--nLimit <= 0) {
4130                  // When this block is requested, we'll send an inv that'll
4131                  // trigger the peer to getblocks the next batch of inventory.
4132                  LogDebug(BCLog::NET, "  getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4133                  WITH_LOCK(peer->m_block_inv_mutex, {peer->m_continuation_block = pindex->GetBlockHash();});
4134                  break;
4135              }
4136          }
4137          return;
4138      }
4139  
4140      if (msg_type == NetMsgType::GETBLOCKTXN) {
4141          BlockTransactionsRequest req;
4142          vRecv >> req;
4143  
4144          std::shared_ptr<const CBlock> recent_block;
4145          {
4146              LOCK(m_most_recent_block_mutex);
4147              if (m_most_recent_block_hash == req.blockhash)
4148                  recent_block = m_most_recent_block;
4149              // Unlock m_most_recent_block_mutex to avoid cs_main lock inversion
4150          }
4151          if (recent_block) {
4152              SendBlockTransactions(pfrom, *peer, *recent_block, req);
4153              return;
4154          }
4155  
4156          FlatFilePos block_pos{};
4157          {
4158              LOCK(cs_main);
4159  
4160              const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(req.blockhash);
4161              if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) {
4162                  LogDebug(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId());
4163                  return;
4164              }
4165  
4166              if (pindex->nHeight >= m_chainman.ActiveChain().Height() - MAX_BLOCKTXN_DEPTH) {
4167                  block_pos = pindex->GetBlockPos();
4168              }
4169          }
4170  
4171          if (!block_pos.IsNull()) {
4172              CBlock block;
4173              const bool ret{m_chainman.m_blockman.ReadBlock(block, block_pos)};
4174              // If height is above MAX_BLOCKTXN_DEPTH then this block cannot get
4175              // pruned after we release cs_main above, so this read should never fail.
4176              assert(ret);
4177  
4178              SendBlockTransactions(pfrom, *peer, block, req);
4179              return;
4180          }
4181  
4182          // If an older block is requested (should never happen in practice,
4183          // but can happen in tests) send a block response instead of a
4184          // blocktxn response. Sending a full block response instead of a
4185          // small blocktxn response is preferable in the case where a peer
4186          // might maliciously send lots of getblocktxn requests to trigger
4187          // expensive disk reads, because it will require the peer to
4188          // actually receive all the data read from disk over the network.
4189          LogDebug(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
4190          CInv inv{MSG_WITNESS_BLOCK, req.blockhash};
4191          WITH_LOCK(peer->m_getdata_requests_mutex, peer->m_getdata_requests.push_back(inv));
4192          // The message processing loop will go around again (without pausing) and we'll respond then
4193          return;
4194      }
4195  
4196      if (msg_type == NetMsgType::GETHEADERS) {
4197          CBlockLocator locator;
4198          uint256 hashStop;
4199          vRecv >> locator >> hashStop;
4200  
4201          if (locator.vHave.size() > MAX_LOCATOR_SZ) {
4202              LogDebug(BCLog::NET, "getheaders locator size %lld > %d, %s\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.DisconnectMsg(fLogIPs));
4203              pfrom.fDisconnect = true;
4204              return;
4205          }
4206  
4207          if (m_chainman.m_blockman.LoadingBlocks()) {
4208              LogDebug(BCLog::NET, "Ignoring getheaders from peer=%d while importing/reindexing\n", pfrom.GetId());
4209              return;
4210          }
4211  
4212          LOCK(cs_main);
4213  
4214          // Don't serve headers from our active chain until our chainwork is at least
4215          // the minimum chain work. This prevents us from starting a low-work headers
4216          // sync that will inevitably be aborted by our peer.
4217          if (m_chainman.ActiveTip() == nullptr ||
4218                  (m_chainman.ActiveTip()->nChainWork < m_chainman.MinimumChainWork() && !pfrom.HasPermission(NetPermissionFlags::Download))) {
4219              LogDebug(BCLog::NET, "Ignoring getheaders from peer=%d because active chain has too little work; sending empty response\n", pfrom.GetId());
4220              // Just respond with an empty headers message, to tell the peer to
4221              // go away but not treat us as unresponsive.
4222              MakeAndPushMessage(pfrom, NetMsgType::HEADERS, std::vector<CBlockHeader>());
4223              return;
4224          }
4225  
4226          CNodeState *nodestate = State(pfrom.GetId());
4227          const CBlockIndex* pindex = nullptr;
4228          if (locator.IsNull())
4229          {
4230              // If locator is null, return the hashStop block
4231              pindex = m_chainman.m_blockman.LookupBlockIndex(hashStop);
4232              if (!pindex) {
4233                  return;
4234              }
4235  
4236              if (!BlockRequestAllowed(pindex)) {
4237                  LogDebug(BCLog::NET, "%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom.GetId());
4238                  return;
4239              }
4240          }
4241          else
4242          {
4243              // Find the last block the caller has in the main chain
4244              pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
4245              if (pindex)
4246                  pindex = m_chainman.ActiveChain().Next(pindex);
4247          }
4248  
4249          // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
4250          std::vector<CBlock> vHeaders;
4251          int nLimit = m_opts.max_headers_result;
4252          LogDebug(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId());
4253          for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex))
4254          {
4255              vHeaders.emplace_back(pindex->GetBlockHeader());
4256              if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
4257                  break;
4258          }
4259          // pindex can be nullptr either if we sent m_chainman.ActiveChain().Tip() OR
4260          // if our peer has m_chainman.ActiveChain().Tip() (and thus we are sending an empty
4261          // headers message). In both cases it's safe to update
4262          // pindexBestHeaderSent to be our tip.
4263          //
4264          // It is important that we simply reset the BestHeaderSent value here,
4265          // and not max(BestHeaderSent, newHeaderSent). We might have announced
4266          // the currently-being-connected tip using a compact block, which
4267          // resulted in the peer sending a headers request, which we respond to
4268          // without the new block. By resetting the BestHeaderSent, we ensure we
4269          // will re-announce the new block via headers (or compact blocks again)
4270          // in the SendMessages logic.
4271          nodestate->pindexBestHeaderSent = pindex ? pindex : m_chainman.ActiveChain().Tip();
4272          MakeAndPushMessage(pfrom, NetMsgType::HEADERS, TX_WITH_WITNESS(vHeaders));
4273          return;
4274      }
4275  
4276      if (msg_type == NetMsgType::TX) {
4277          if (RejectIncomingTxs(pfrom)) {
4278              LogDebug(BCLog::NET, "transaction sent in violation of protocol, %s", pfrom.DisconnectMsg(fLogIPs));
4279              pfrom.fDisconnect = true;
4280              return;
4281          }
4282  
4283          // Stop processing the transaction early if we are still in IBD since we don't
4284          // have enough information to validate it yet. Sending unsolicited transactions
4285          // is not considered a protocol violation, so don't punish the peer.
4286          if (m_chainman.IsInitialBlockDownload()) return;
4287  
4288          CTransactionRef ptx;
4289          vRecv >> TX_WITH_WITNESS(ptx);
4290          const CTransaction& tx = *ptx;
4291  
4292          const uint256& txid = ptx->GetHash();
4293          const uint256& wtxid = ptx->GetWitnessHash();
4294  
4295          const uint256& hash = peer->m_wtxid_relay ? wtxid : txid;
4296          AddKnownTx(*peer, hash);
4297  
4298          LOCK2(cs_main, m_tx_download_mutex);
4299  
4300          const auto& [should_validate, package_to_validate] = m_txdownloadman.ReceivedTx(pfrom.GetId(), ptx);
4301          if (!should_validate) {
4302              if (pfrom.HasPermission(NetPermissionFlags::ForceRelay)) {
4303                  // Always relay transactions received from peers with forcerelay
4304                  // permission, even if they were already in the mempool, allowing
4305                  // the node to function as a gateway for nodes hidden behind it.
4306                  if (!m_mempool.exists(GenTxid::Txid(tx.GetHash()))) {
4307                      LogPrintf("Not relaying non-mempool transaction %s (wtxid=%s) from forcerelay peer=%d\n",
4308                                tx.GetHash().ToString(), tx.GetWitnessHash().ToString(), pfrom.GetId());
4309                  } else {
4310                      LogPrintf("Force relaying tx %s (wtxid=%s) from peer=%d\n",
4311                                tx.GetHash().ToString(), tx.GetWitnessHash().ToString(), pfrom.GetId());
4312                      RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
4313                  }
4314              }
4315  
4316              if (package_to_validate) {
4317                  const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)};
4318                  LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(),
4319                           package_result.m_state.IsValid() ? "package accepted" : "package rejected");
4320                  ProcessPackageResult(package_to_validate.value(), package_result);
4321              }
4322              return;
4323          }
4324  
4325          // ReceivedTx should not be telling us to validate the tx and a package.
4326          Assume(!package_to_validate.has_value());
4327  
4328          const MempoolAcceptResult result = m_chainman.ProcessTransaction(ptx);
4329          const TxValidationState& state = result.m_state;
4330  
4331          if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) {
4332              ProcessValidTx(pfrom.GetId(), ptx, result.m_replaced_transactions);
4333              pfrom.m_last_tx_time = GetTime<std::chrono::seconds>();
4334          }
4335          if (state.IsInvalid()) {
4336              if (auto package_to_validate{ProcessInvalidTx(pfrom.GetId(), ptx, state, /*first_time_failure=*/true)}) {
4337                  const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)};
4338                  LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(),
4339                           package_result.m_state.IsValid() ? "package accepted" : "package rejected");
4340                  ProcessPackageResult(package_to_validate.value(), package_result);
4341              }
4342          }
4343  
4344          return;
4345      }
4346  
4347      if (msg_type == NetMsgType::CMPCTBLOCK)
4348      {
4349          // Ignore cmpctblock received while importing
4350          if (m_chainman.m_blockman.LoadingBlocks()) {
4351              LogDebug(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId());
4352              return;
4353          }
4354  
4355          CBlockHeaderAndShortTxIDs cmpctblock;
4356          vRecv >> cmpctblock;
4357  
4358          bool received_new_header = false;
4359          const auto blockhash = cmpctblock.header.GetHash();
4360  
4361          {
4362          LOCK(cs_main);
4363  
4364          const CBlockIndex* prev_block = m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock);
4365          if (!prev_block) {
4366              // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
4367              if (!m_chainman.IsInitialBlockDownload()) {
4368                  MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer);
4369              }
4370              return;
4371          } else if (prev_block->nChainWork + CalculateClaimedHeadersWork({{cmpctblock.header}}) < GetAntiDoSWorkThreshold()) {
4372              // If we get a low-work header in a compact block, we can ignore it.
4373              LogDebug(BCLog::NET, "Ignoring low-work compact block from peer %d\n", pfrom.GetId());
4374              return;
4375          }
4376  
4377          if (!m_chainman.m_blockman.LookupBlockIndex(blockhash)) {
4378              received_new_header = true;
4379          }
4380          }
4381  
4382          const CBlockIndex *pindex = nullptr;
4383          BlockValidationState state;
4384          if (!m_chainman.ProcessNewBlockHeaders({{cmpctblock.header}}, /*min_pow_checked=*/true, state, &pindex)) {
4385              if (state.IsInvalid()) {
4386                  MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block=*/true, "invalid header via cmpctblock");
4387                  return;
4388              }
4389          }
4390  
4391          // If AcceptBlockHeader returned true, it set pindex
4392          Assert(pindex);
4393          if (received_new_header) {
4394              LogBlockHeader(*pindex, pfrom, /*via_compact_block=*/true);
4395          }
4396  
4397          bool fProcessBLOCKTXN = false;
4398  
4399          // If we end up treating this as a plain headers message, call that as well
4400          // without cs_main.
4401          bool fRevertToHeaderProcessing = false;
4402  
4403          // Keep a CBlock for "optimistic" compactblock reconstructions (see
4404          // below)
4405          std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4406          bool fBlockReconstructed = false;
4407  
4408          {
4409          LOCK(cs_main);
4410          UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
4411  
4412          CNodeState *nodestate = State(pfrom.GetId());
4413  
4414          // If this was a new header with more work than our tip, update the
4415          // peer's last block announcement time
4416          if (received_new_header && pindex->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
4417              nodestate->m_last_block_announcement = GetTime();
4418          }
4419  
4420          if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
4421              return;
4422  
4423          auto range_flight = mapBlocksInFlight.equal_range(pindex->GetBlockHash());
4424          size_t already_in_flight = std::distance(range_flight.first, range_flight.second);
4425          bool requested_block_from_this_peer{false};
4426  
4427          // Multimap ensures ordering of outstanding requests. It's either empty or first in line.
4428          bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId());
4429  
4430          while (range_flight.first != range_flight.second) {
4431              if (range_flight.first->second.first == pfrom.GetId()) {
4432                  requested_block_from_this_peer = true;
4433                  break;
4434              }
4435              range_flight.first++;
4436          }
4437  
4438          if (pindex->nChainWork <= m_chainman.ActiveChain().Tip()->nChainWork || // We know something better
4439                  pindex->nTx != 0) { // We had this block at some point, but pruned it
4440              if (requested_block_from_this_peer) {
4441                  // We requested this block for some reason, but our mempool will probably be useless
4442                  // so we just grab the block via normal getdata
4443                  std::vector<CInv> vInv(1);
4444                  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4445                  MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv);
4446              }
4447              return;
4448          }
4449  
4450          // If we're not close to tip yet, give up and let parallel block fetch work its magic
4451          if (!already_in_flight && !CanDirectFetch()) {
4452              return;
4453          }
4454  
4455          // We want to be a bit conservative just to be extra careful about DoS
4456          // possibilities in compact block processing...
4457          if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) {
4458              if ((already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK && nodestate->vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
4459                   requested_block_from_this_peer) {
4460                  std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr;
4461                  if (!BlockRequested(pfrom.GetId(), *pindex, &queuedBlockIt)) {
4462                      if (!(*queuedBlockIt)->partialBlock)
4463                          (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool));
4464                      else {
4465                          // The block was already in flight using compact blocks from the same peer
4466                          LogDebug(BCLog::NET, "Peer sent us compact block we were already syncing!\n");
4467                          return;
4468                      }
4469                  }
4470  
4471                  PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock;
4472                  ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
4473                  if (status == READ_STATUS_INVALID) {
4474                      RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect
4475                      Misbehaving(*peer, "invalid compact block");
4476                      return;
4477                  } else if (status == READ_STATUS_FAILED) {
4478                      if (first_in_flight)  {
4479                          // Duplicate txindexes, the block is now in-flight, so just request it
4480                          std::vector<CInv> vInv(1);
4481                          vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4482                          MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv);
4483                      } else {
4484                          // Give up for this peer and wait for other peer(s)
4485                          RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId());
4486                      }
4487                      return;
4488                  }
4489  
4490                  BlockTransactionsRequest req;
4491                  for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
4492                      if (!partialBlock.IsTxAvailable(i))
4493                          req.indexes.push_back(i);
4494                  }
4495                  if (req.indexes.empty()) {
4496                      fProcessBLOCKTXN = true;
4497                  } else if (first_in_flight) {
4498                      // We will try to round-trip any compact blocks we get on failure,
4499                      // as long as it's first...
4500                      req.blockhash = pindex->GetBlockHash();
4501                      MakeAndPushMessage(pfrom, NetMsgType::GETBLOCKTXN, req);
4502                  } else if (pfrom.m_bip152_highbandwidth_to &&
4503                      (!pfrom.IsInboundConn() ||
4504                      IsBlockRequestedFromOutbound(blockhash) ||
4505                      already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK - 1)) {
4506                      // ... or it's a hb relay peer and:
4507                      // - peer is outbound, or
4508                      // - we already have an outbound attempt in flight(so we'll take what we can get), or
4509                      // - it's not the final parallel download slot (which we may reserve for first outbound)
4510                      req.blockhash = pindex->GetBlockHash();
4511                      MakeAndPushMessage(pfrom, NetMsgType::GETBLOCKTXN, req);
4512                  } else {
4513                      // Give up for this peer and wait for other peer(s)
4514                      RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId());
4515                  }
4516              } else {
4517                  // This block is either already in flight from a different
4518                  // peer, or this peer has too many blocks outstanding to
4519                  // download from.
4520                  // Optimistically try to reconstruct anyway since we might be
4521                  // able to without any round trips.
4522                  PartiallyDownloadedBlock tempBlock(&m_mempool);
4523                  ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
4524                  if (status != READ_STATUS_OK) {
4525                      // TODO: don't ignore failures
4526                      return;
4527                  }
4528                  std::vector<CTransactionRef> dummy;
4529                  status = tempBlock.FillBlock(*pblock, dummy);
4530                  if (status == READ_STATUS_OK) {
4531                      fBlockReconstructed = true;
4532                  }
4533              }
4534          } else {
4535              if (requested_block_from_this_peer) {
4536                  // We requested this block, but its far into the future, so our
4537                  // mempool will probably be useless - request the block normally
4538                  std::vector<CInv> vInv(1);
4539                  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4540                  MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv);
4541                  return;
4542              } else {
4543                  // If this was an announce-cmpctblock, we want the same treatment as a header message
4544                  fRevertToHeaderProcessing = true;
4545              }
4546          }
4547          } // cs_main
4548  
4549          if (fProcessBLOCKTXN) {
4550              BlockTransactions txn;
4551              txn.blockhash = blockhash;
4552              return ProcessCompactBlockTxns(pfrom, *peer, txn);
4553          }
4554  
4555          if (fRevertToHeaderProcessing) {
4556              // Headers received from HB compact block peers are permitted to be
4557              // relayed before full validation (see BIP 152), so we don't want to disconnect
4558              // the peer if the header turns out to be for an invalid block.
4559              // Note that if a peer tries to build on an invalid chain, that
4560              // will be detected and the peer will be disconnected/discouraged.
4561              return ProcessHeadersMessage(pfrom, *peer, {cmpctblock.header}, /*via_compact_block=*/true);
4562          }
4563  
4564          if (fBlockReconstructed) {
4565              // If we got here, we were able to optimistically reconstruct a
4566              // block that is in flight from some other peer.
4567              {
4568                  LOCK(cs_main);
4569                  mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom.GetId(), false));
4570              }
4571              // Setting force_processing to true means that we bypass some of
4572              // our anti-DoS protections in AcceptBlock, which filters
4573              // unrequested blocks that might be trying to waste our resources
4574              // (eg disk space). Because we only try to reconstruct blocks when
4575              // we're close to caught up (via the CanDirectFetch() requirement
4576              // above, combined with the behavior of not requesting blocks until
4577              // we have a chain with at least the minimum chain work), and we ignore
4578              // compact blocks with less work than our tip, it is safe to treat
4579              // reconstructed compact blocks as having been requested.
4580              ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true);
4581              LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid()
4582              if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) {
4583                  // Clear download state for this block, which is in
4584                  // process from some other peer.  We do this after calling
4585                  // ProcessNewBlock so that a malleated cmpctblock announcement
4586                  // can't be used to interfere with block relay.
4587                  RemoveBlockRequest(pblock->GetHash(), std::nullopt);
4588              }
4589          }
4590          return;
4591      }
4592  
4593      if (msg_type == NetMsgType::BLOCKTXN)
4594      {
4595          // Ignore blocktxn received while importing
4596          if (m_chainman.m_blockman.LoadingBlocks()) {
4597              LogDebug(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId());
4598              return;
4599          }
4600  
4601          BlockTransactions resp;
4602          vRecv >> resp;
4603  
4604          return ProcessCompactBlockTxns(pfrom, *peer, resp);
4605      }
4606  
4607      if (msg_type == NetMsgType::HEADERS)
4608      {
4609          // Ignore headers received while importing
4610          if (m_chainman.m_blockman.LoadingBlocks()) {
4611              LogDebug(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId());
4612              return;
4613          }
4614  
4615          std::vector<CBlockHeader> headers;
4616  
4617          // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
4618          unsigned int nCount = ReadCompactSize(vRecv);
4619          if (nCount > m_opts.max_headers_result) {
4620              Misbehaving(*peer, strprintf("headers message size = %u", nCount));
4621              return;
4622          }
4623          headers.resize(nCount);
4624          for (unsigned int n = 0; n < nCount; n++) {
4625              vRecv >> headers[n];
4626              ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
4627          }
4628  
4629          ProcessHeadersMessage(pfrom, *peer, std::move(headers), /*via_compact_block=*/false);
4630  
4631          // Check if the headers presync progress needs to be reported to validation.
4632          // This needs to be done without holding the m_headers_presync_mutex lock.
4633          if (m_headers_presync_should_signal.exchange(false)) {
4634              HeadersPresyncStats stats;
4635              {
4636                  LOCK(m_headers_presync_mutex);
4637                  auto it = m_headers_presync_stats.find(m_headers_presync_bestpeer);
4638                  if (it != m_headers_presync_stats.end()) stats = it->second;
4639              }
4640              if (stats.second) {
4641                  m_chainman.ReportHeadersPresync(stats.first, stats.second->first, stats.second->second);
4642              }
4643          }
4644  
4645          return;
4646      }
4647  
4648      if (msg_type == NetMsgType::BLOCK)
4649      {
4650          // Ignore block received while importing
4651          if (m_chainman.m_blockman.LoadingBlocks()) {
4652              LogDebug(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId());
4653              return;
4654          }
4655  
4656          std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4657          vRecv >> TX_WITH_WITNESS(*pblock);
4658  
4659          LogDebug(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom.GetId());
4660  
4661          const CBlockIndex* prev_block{WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.LookupBlockIndex(pblock->hashPrevBlock))};
4662  
4663          // Check for possible mutation if it connects to something we know so we can check for DEPLOYMENT_SEGWIT being active
4664          if (prev_block && IsBlockMutated(/*block=*/*pblock,
4665                             /*check_witness_root=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT))) {
4666              LogDebug(BCLog::NET, "Received mutated block from peer=%d\n", peer->m_id);
4667              Misbehaving(*peer, "mutated block");
4668              WITH_LOCK(cs_main, RemoveBlockRequest(pblock->GetHash(), peer->m_id));
4669              return;
4670          }
4671  
4672          bool forceProcessing = false;
4673          const uint256 hash(pblock->GetHash());
4674          bool min_pow_checked = false;
4675          {
4676              LOCK(cs_main);
4677              // Always process the block if we requested it, since we may
4678              // need it even when it's not a candidate for a new best tip.
4679              forceProcessing = IsBlockRequested(hash);
4680              RemoveBlockRequest(hash, pfrom.GetId());
4681              // mapBlockSource is only used for punishing peers and setting
4682              // which peers send us compact blocks, so the race between here and
4683              // cs_main in ProcessNewBlock is fine.
4684              mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
4685  
4686              // Check claimed work on this block against our anti-dos thresholds.
4687              if (prev_block && prev_block->nChainWork + CalculateClaimedHeadersWork({{pblock->GetBlockHeader()}}) >= GetAntiDoSWorkThreshold()) {
4688                  min_pow_checked = true;
4689              }
4690          }
4691          ProcessBlock(pfrom, pblock, forceProcessing, min_pow_checked);
4692          return;
4693      }
4694  
4695      if (msg_type == NetMsgType::GETADDR) {
4696          // This asymmetric behavior for inbound and outbound connections was introduced
4697          // to prevent a fingerprinting attack: an attacker can send specific fake addresses
4698          // to users' AddrMan and later request them by sending getaddr messages.
4699          // Making nodes which are behind NAT and can only make outgoing connections ignore
4700          // the getaddr message mitigates the attack.
4701          if (!pfrom.IsInboundConn()) {
4702              LogDebug(BCLog::NET, "Ignoring \"getaddr\" from %s connection. peer=%d\n", pfrom.ConnectionTypeAsString(), pfrom.GetId());
4703              return;
4704          }
4705  
4706          // Since this must be an inbound connection, SetupAddressRelay will
4707          // never fail.
4708          Assume(SetupAddressRelay(pfrom, *peer));
4709  
4710          // Only send one GetAddr response per connection to reduce resource waste
4711          // and discourage addr stamping of INV announcements.
4712          if (peer->m_getaddr_recvd) {
4713              LogDebug(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom.GetId());
4714              return;
4715          }
4716          peer->m_getaddr_recvd = true;
4717  
4718          peer->m_addrs_to_send.clear();
4719          std::vector<CAddress> vAddr;
4720          if (pfrom.HasPermission(NetPermissionFlags::Addr)) {
4721              vAddr = m_connman.GetAddresses(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND, /*network=*/std::nullopt);
4722          } else {
4723              vAddr = m_connman.GetAddresses(pfrom, MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND);
4724          }
4725          for (const CAddress &addr : vAddr) {
4726              PushAddress(*peer, addr);
4727          }
4728          return;
4729      }
4730  
4731      if (msg_type == NetMsgType::MEMPOOL) {
4732          // Only process received mempool messages if we advertise NODE_BLOOM
4733          // or if the peer has mempool permissions.
4734          if (!(peer->m_our_services & NODE_BLOOM) && !pfrom.HasPermission(NetPermissionFlags::Mempool))
4735          {
4736              if (!pfrom.HasPermission(NetPermissionFlags::NoBan))
4737              {
4738                  LogDebug(BCLog::NET, "mempool request with bloom filters disabled, %s\n", pfrom.DisconnectMsg(fLogIPs));
4739                  pfrom.fDisconnect = true;
4740              }
4741              return;
4742          }
4743  
4744          if (m_connman.OutboundTargetReached(false) && !pfrom.HasPermission(NetPermissionFlags::Mempool))
4745          {
4746              if (!pfrom.HasPermission(NetPermissionFlags::NoBan))
4747              {
4748                  LogDebug(BCLog::NET, "mempool request with bandwidth limit reached, %s\n", pfrom.DisconnectMsg(fLogIPs));
4749                  pfrom.fDisconnect = true;
4750              }
4751              return;
4752          }
4753  
4754          if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
4755              LOCK(tx_relay->m_tx_inventory_mutex);
4756              tx_relay->m_send_mempool = true;
4757          }
4758          return;
4759      }
4760  
4761      if (msg_type == NetMsgType::PING) {
4762          if (pfrom.GetCommonVersion() > BIP0031_VERSION) {
4763              uint64_t nonce = 0;
4764              vRecv >> nonce;
4765              // Echo the message back with the nonce. This allows for two useful features:
4766              //
4767              // 1) A remote node can quickly check if the connection is operational
4768              // 2) Remote nodes can measure the latency of the network thread. If this node
4769              //    is overloaded it won't respond to pings quickly and the remote node can
4770              //    avoid sending us more work, like chain download requests.
4771              //
4772              // The nonce stops the remote getting confused between different pings: without
4773              // it, if the remote node sends a ping once per second and this node takes 5
4774              // seconds to respond to each, the 5th ping the remote sends would appear to
4775              // return very quickly.
4776              MakeAndPushMessage(pfrom, NetMsgType::PONG, nonce);
4777          }
4778          return;
4779      }
4780  
4781      if (msg_type == NetMsgType::PONG) {
4782          const auto ping_end = time_received;
4783          uint64_t nonce = 0;
4784          size_t nAvail = vRecv.in_avail();
4785          bool bPingFinished = false;
4786          std::string sProblem;
4787  
4788          if (nAvail >= sizeof(nonce)) {
4789              vRecv >> nonce;
4790  
4791              // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
4792              if (peer->m_ping_nonce_sent != 0) {
4793                  if (nonce == peer->m_ping_nonce_sent) {
4794                      // Matching pong received, this ping is no longer outstanding
4795                      bPingFinished = true;
4796                      const auto ping_time = ping_end - peer->m_ping_start.load();
4797                      if (ping_time.count() >= 0) {
4798                          // Let connman know about this successful ping-pong
4799                          pfrom.PongReceived(ping_time);
4800                      } else {
4801                          // This should never happen
4802                          sProblem = "Timing mishap";
4803                      }
4804                  } else {
4805                      // Nonce mismatches are normal when pings are overlapping
4806                      sProblem = "Nonce mismatch";
4807                      if (nonce == 0) {
4808                          // This is most likely a bug in another implementation somewhere; cancel this ping
4809                          bPingFinished = true;
4810                          sProblem = "Nonce zero";
4811                      }
4812                  }
4813              } else {
4814                  sProblem = "Unsolicited pong without ping";
4815              }
4816          } else {
4817              // This is most likely a bug in another implementation somewhere; cancel this ping
4818              bPingFinished = true;
4819              sProblem = "Short payload";
4820          }
4821  
4822          if (!(sProblem.empty())) {
4823              LogDebug(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
4824                  pfrom.GetId(),
4825                  sProblem,
4826                  peer->m_ping_nonce_sent,
4827                  nonce,
4828                  nAvail);
4829          }
4830          if (bPingFinished) {
4831              peer->m_ping_nonce_sent = 0;
4832          }
4833          return;
4834      }
4835  
4836      if (msg_type == NetMsgType::FILTERLOAD) {
4837          if (!(peer->m_our_services & NODE_BLOOM)) {
4838              LogDebug(BCLog::NET, "filterload received despite not offering bloom services, %s\n", pfrom.DisconnectMsg(fLogIPs));
4839              pfrom.fDisconnect = true;
4840              return;
4841          }
4842          CBloomFilter filter;
4843          vRecv >> filter;
4844  
4845          if (!filter.IsWithinSizeConstraints())
4846          {
4847              // There is no excuse for sending a too-large filter
4848              Misbehaving(*peer, "too-large bloom filter");
4849          } else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
4850              {
4851                  LOCK(tx_relay->m_bloom_filter_mutex);
4852                  tx_relay->m_bloom_filter.reset(new CBloomFilter(filter));
4853                  tx_relay->m_relay_txs = true;
4854              }
4855              pfrom.m_bloom_filter_loaded = true;
4856              pfrom.m_relays_txs = true;
4857          }
4858          return;
4859      }
4860  
4861      if (msg_type == NetMsgType::FILTERADD) {
4862          if (!(peer->m_our_services & NODE_BLOOM)) {
4863              LogDebug(BCLog::NET, "filteradd received despite not offering bloom services, %s\n", pfrom.DisconnectMsg(fLogIPs));
4864              pfrom.fDisconnect = true;
4865              return;
4866          }
4867          std::vector<unsigned char> vData;
4868          vRecv >> vData;
4869  
4870          // Nodes must NEVER send a data item > MAX_SCRIPT_ELEMENT_SIZE bytes (the max size for a script data object,
4871          // and thus, the maximum size any matched object can have) in a filteradd message
4872          bool bad = false;
4873          if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
4874              bad = true;
4875          } else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
4876              LOCK(tx_relay->m_bloom_filter_mutex);
4877              if (tx_relay->m_bloom_filter) {
4878                  tx_relay->m_bloom_filter->insert(vData);
4879              } else {
4880                  bad = true;
4881              }
4882          }
4883          if (bad) {
4884              Misbehaving(*peer, "bad filteradd message");
4885          }
4886          return;
4887      }
4888  
4889      if (msg_type == NetMsgType::FILTERCLEAR) {
4890          if (!(peer->m_our_services & NODE_BLOOM)) {
4891              LogDebug(BCLog::NET, "filterclear received despite not offering bloom services, %s\n", pfrom.DisconnectMsg(fLogIPs));
4892              pfrom.fDisconnect = true;
4893              return;
4894          }
4895          auto tx_relay = peer->GetTxRelay();
4896          if (!tx_relay) return;
4897  
4898          {
4899              LOCK(tx_relay->m_bloom_filter_mutex);
4900              tx_relay->m_bloom_filter = nullptr;
4901              tx_relay->m_relay_txs = true;
4902          }
4903          pfrom.m_bloom_filter_loaded = false;
4904          pfrom.m_relays_txs = true;
4905          return;
4906      }
4907  
4908      if (msg_type == NetMsgType::FEEFILTER) {
4909          CAmount newFeeFilter = 0;
4910          vRecv >> newFeeFilter;
4911          if (MoneyRange(newFeeFilter)) {
4912              if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
4913                  tx_relay->m_fee_filter_received = newFeeFilter;
4914              }
4915              LogDebug(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
4916          }
4917          return;
4918      }
4919  
4920      if (msg_type == NetMsgType::GETCFILTERS) {
4921          ProcessGetCFilters(pfrom, *peer, vRecv);
4922          return;
4923      }
4924  
4925      if (msg_type == NetMsgType::GETCFHEADERS) {
4926          ProcessGetCFHeaders(pfrom, *peer, vRecv);
4927          return;
4928      }
4929  
4930      if (msg_type == NetMsgType::GETCFCHECKPT) {
4931          ProcessGetCFCheckPt(pfrom, *peer, vRecv);
4932          return;
4933      }
4934  
4935      if (msg_type == NetMsgType::NOTFOUND) {
4936          std::vector<CInv> vInv;
4937          vRecv >> vInv;
4938          std::vector<uint256> tx_invs;
4939          if (vInv.size() <= node::MAX_PEER_TX_ANNOUNCEMENTS + MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
4940              for (CInv &inv : vInv) {
4941                  if (inv.IsGenTxMsg()) {
4942                      tx_invs.emplace_back(inv.hash);
4943                  }
4944              }
4945          }
4946          LOCK(m_tx_download_mutex);
4947          m_txdownloadman.ReceivedNotFound(pfrom.GetId(), tx_invs);
4948          return;
4949      }
4950  
4951      // Ignore unknown commands for extensibility
4952      LogDebug(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
4953      return;
4954  }
4955  
4956  bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer)
4957  {
4958      {
4959          LOCK(peer.m_misbehavior_mutex);
4960  
4961          // There's nothing to do if the m_should_discourage flag isn't set
4962          if (!peer.m_should_discourage) return false;
4963  
4964          peer.m_should_discourage = false;
4965      } // peer.m_misbehavior_mutex
4966  
4967      if (pnode.HasPermission(NetPermissionFlags::NoBan)) {
4968          // We never disconnect or discourage peers for bad behavior if they have NetPermissionFlags::NoBan permission
4969          LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id);
4970          return false;
4971      }
4972  
4973      if (pnode.IsManualConn()) {
4974          // We never disconnect or discourage manual peers for bad behavior
4975          LogPrintf("Warning: not punishing manually connected peer %d!\n", peer.m_id);
4976          return false;
4977      }
4978  
4979      if (pnode.addr.IsLocal()) {
4980          // We disconnect local peers for bad behavior but don't discourage (since that would discourage
4981          // all peers on the same local address)
4982          LogDebug(BCLog::NET, "Warning: disconnecting but not discouraging %s peer %d!\n",
4983                   pnode.m_inbound_onion ? "inbound onion" : "local", peer.m_id);
4984          pnode.fDisconnect = true;
4985          return true;
4986      }
4987  
4988      // Normal case: Disconnect the peer and discourage all nodes sharing the address
4989      LogDebug(BCLog::NET, "Disconnecting and discouraging peer %d!\n", peer.m_id);
4990      if (m_banman) m_banman->Discourage(pnode.addr);
4991      m_connman.DisconnectNode(pnode.addr);
4992      return true;
4993  }
4994  
4995  bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc)
4996  {
4997      AssertLockNotHeld(m_tx_download_mutex);
4998      AssertLockHeld(g_msgproc_mutex);
4999  
5000      PeerRef peer = GetPeerRef(pfrom->GetId());
5001      if (peer == nullptr) return false;
5002  
5003      // For outbound connections, ensure that the initial VERSION message
5004      // has been sent first before processing any incoming messages
5005      if (!pfrom->IsInboundConn() && !peer->m_outbound_version_message_sent) return false;
5006  
5007      {
5008          LOCK(peer->m_getdata_requests_mutex);
5009          if (!peer->m_getdata_requests.empty()) {
5010              ProcessGetData(*pfrom, *peer, interruptMsgProc);
5011          }
5012      }
5013  
5014      const bool processed_orphan = ProcessOrphanTx(*peer);
5015  
5016      if (pfrom->fDisconnect)
5017          return false;
5018  
5019      if (processed_orphan) return true;
5020  
5021      // this maintains the order of responses
5022      // and prevents m_getdata_requests to grow unbounded
5023      {
5024          LOCK(peer->m_getdata_requests_mutex);
5025          if (!peer->m_getdata_requests.empty()) return true;
5026      }
5027  
5028      // Don't bother if send buffer is too full to respond anyway
5029      if (pfrom->fPauseSend) return false;
5030  
5031      auto poll_result{pfrom->PollMessage()};
5032      if (!poll_result) {
5033          // No message to process
5034          return false;
5035      }
5036  
5037      CNetMessage& msg{poll_result->first};
5038      bool fMoreWork = poll_result->second;
5039  
5040      TRACEPOINT(net, inbound_message,
5041          pfrom->GetId(),
5042          pfrom->m_addr_name.c_str(),
5043          pfrom->ConnectionTypeAsString().c_str(),
5044          msg.m_type.c_str(),
5045          msg.m_recv.size(),
5046          msg.m_recv.data()
5047      );
5048  
5049      if (m_opts.capture_messages) {
5050          CaptureMessage(pfrom->addr, msg.m_type, MakeUCharSpan(msg.m_recv), /*is_incoming=*/true);
5051      }
5052  
5053      try {
5054          ProcessMessage(*pfrom, msg.m_type, msg.m_recv, msg.m_time, interruptMsgProc);
5055          if (interruptMsgProc) return false;
5056          {
5057              LOCK(peer->m_getdata_requests_mutex);
5058              if (!peer->m_getdata_requests.empty()) fMoreWork = true;
5059          }
5060          // Does this peer has an orphan ready to reconsider?
5061          // (Note: we may have provided a parent for an orphan provided
5062          //  by another peer that was already processed; in that case,
5063          //  the extra work may not be noticed, possibly resulting in an
5064          //  unnecessary 100ms delay)
5065          LOCK(m_tx_download_mutex);
5066          if (m_txdownloadman.HaveMoreWork(peer->m_id)) fMoreWork = true;
5067      } catch (const std::exception& e) {
5068          LogDebug(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size, e.what(), typeid(e).name());
5069      } catch (...) {
5070          LogDebug(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size);
5071      }
5072  
5073      return fMoreWork;
5074  }
5075  
5076  void PeerManagerImpl::ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds)
5077  {
5078      AssertLockHeld(cs_main);
5079  
5080      CNodeState &state = *State(pto.GetId());
5081  
5082      if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() && state.fSyncStarted) {
5083          // This is an outbound peer subject to disconnection if they don't
5084          // announce a block with as much work as the current tip within
5085          // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if
5086          // their chain has more work than ours, we should sync to it,
5087          // unless it's invalid, in which case we should find that out and
5088          // disconnect from them elsewhere).
5089          if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork) {
5090              // The outbound peer has sent us a block with at least as much work as our current tip, so reset the timeout if it was set
5091              if (state.m_chain_sync.m_timeout != 0s) {
5092                  state.m_chain_sync.m_timeout = 0s;
5093                  state.m_chain_sync.m_work_header = nullptr;
5094                  state.m_chain_sync.m_sent_getheaders = false;
5095              }
5096          } else if (state.m_chain_sync.m_timeout == 0s || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
5097              // At this point we know that the outbound peer has either never sent us a block/header or they have, but its tip is behind ours
5098              // AND
5099              // we are noticing this for the first time (m_timeout is 0)
5100              // OR we noticed this at some point within the last CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds and set a timeout
5101              // for them, they caught up to our tip at the time of setting the timer but not to our current one (we've also advanced).
5102              // Either way, set a new timeout based on our current tip.
5103              state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
5104              state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip();
5105              state.m_chain_sync.m_sent_getheaders = false;
5106          } else if (state.m_chain_sync.m_timeout > 0s && time_in_seconds > state.m_chain_sync.m_timeout) {
5107              // No evidence yet that our peer has synced to a chain with work equal to that
5108              // of our tip, when we first detected it was behind. Send a single getheaders
5109              // message to give the peer a chance to update us.
5110              if (state.m_chain_sync.m_sent_getheaders) {
5111                  // They've run out of time to catch up!
5112                  LogInfo("Outbound peer has old chain, best known block = %s, %s\n", state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", pto.DisconnectMsg(fLogIPs));
5113                  pto.fDisconnect = true;
5114              } else {
5115                  assert(state.m_chain_sync.m_work_header);
5116                  // Here, we assume that the getheaders message goes out,
5117                  // because it'll either go out or be skipped because of a
5118                  // getheaders in-flight already, in which case the peer should
5119                  // still respond to us with a sufficiently high work chain tip.
5120                  MaybeSendGetHeaders(pto,
5121                          GetLocator(state.m_chain_sync.m_work_header->pprev),
5122                          peer);
5123                  LogDebug(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
5124                  state.m_chain_sync.m_sent_getheaders = true;
5125                  // Bump the timeout to allow a response, which could clear the timeout
5126                  // (if the response shows the peer has synced), reset the timeout (if
5127                  // the peer syncs to the required work but not to our tip), or result
5128                  // in disconnect (if we advance to the timeout and pindexBestKnownBlock
5129                  // has not sufficiently progressed)
5130                  state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME;
5131              }
5132          }
5133      }
5134  }
5135  
5136  void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now)
5137  {
5138      // If we have any extra block-relay-only peers, disconnect the youngest unless
5139      // it's given us a block -- in which case, compare with the second-youngest, and
5140      // out of those two, disconnect the peer who least recently gave us a block.
5141      // The youngest block-relay-only peer would be the extra peer we connected
5142      // to temporarily in order to sync our tip; see net.cpp.
5143      // Note that we use higher nodeid as a measure for most recent connection.
5144      if (m_connman.GetExtraBlockRelayCount() > 0) {
5145          std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0}, next_youngest_peer{-1, 0};
5146  
5147          m_connman.ForEachNode([&](CNode* pnode) {
5148              if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) return;
5149              if (pnode->GetId() > youngest_peer.first) {
5150                  next_youngest_peer = youngest_peer;
5151                  youngest_peer.first = pnode->GetId();
5152                  youngest_peer.second = pnode->m_last_block_time;
5153              }
5154          });
5155          NodeId to_disconnect = youngest_peer.first;
5156          if (youngest_peer.second > next_youngest_peer.second) {
5157              // Our newest block-relay-only peer gave us a block more recently;
5158              // disconnect our second youngest.
5159              to_disconnect = next_youngest_peer.first;
5160          }
5161          m_connman.ForNode(to_disconnect, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
5162              AssertLockHeld(::cs_main);
5163              // Make sure we're not getting a block right now, and that
5164              // we've been connected long enough for this eviction to happen
5165              // at all.
5166              // Note that we only request blocks from a peer if we learn of a
5167              // valid headers chain with at least as much work as our tip.
5168              CNodeState *node_state = State(pnode->GetId());
5169              if (node_state == nullptr ||
5170                  (now - pnode->m_connected >= MINIMUM_CONNECT_TIME && node_state->vBlocksInFlight.empty())) {
5171                  pnode->fDisconnect = true;
5172                  LogDebug(BCLog::NET, "disconnecting extra block-relay-only peer=%d (last block received at time %d)\n",
5173                           pnode->GetId(), count_seconds(pnode->m_last_block_time));
5174                  return true;
5175              } else {
5176                  LogDebug(BCLog::NET, "keeping block-relay-only peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
5177                           pnode->GetId(), count_seconds(pnode->m_connected), node_state->vBlocksInFlight.size());
5178              }
5179              return false;
5180          });
5181      }
5182  
5183      // Check whether we have too many outbound-full-relay peers
5184      if (m_connman.GetExtraFullOutboundCount() > 0) {
5185          // If we have more outbound-full-relay peers than we target, disconnect one.
5186          // Pick the outbound-full-relay peer that least recently announced
5187          // us a new block, with ties broken by choosing the more recent
5188          // connection (higher node id)
5189          // Protect peers from eviction if we don't have another connection
5190          // to their network, counting both outbound-full-relay and manual peers.
5191          NodeId worst_peer = -1;
5192          int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
5193  
5194          m_connman.ForEachNode([&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_connman.GetNodesMutex()) {
5195              AssertLockHeld(::cs_main);
5196  
5197              // Only consider outbound-full-relay peers that are not already
5198              // marked for disconnection
5199              if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) return;
5200              CNodeState *state = State(pnode->GetId());
5201              if (state == nullptr) return; // shouldn't be possible, but just in case
5202              // Don't evict our protected peers
5203              if (state->m_chain_sync.m_protect) return;
5204              // If this is the only connection on a particular network that is
5205              // OUTBOUND_FULL_RELAY or MANUAL, protect it.
5206              if (!m_connman.MultipleManualOrFullOutboundConns(pnode->addr.GetNetwork())) return;
5207              if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
5208                  worst_peer = pnode->GetId();
5209                  oldest_block_announcement = state->m_last_block_announcement;
5210              }
5211          });
5212          if (worst_peer != -1) {
5213              bool disconnected = m_connman.ForNode(worst_peer, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
5214                  AssertLockHeld(::cs_main);
5215  
5216                  // Only disconnect a peer that has been connected to us for
5217                  // some reasonable fraction of our check-frequency, to give
5218                  // it time for new information to have arrived.
5219                  // Also don't disconnect any peer we're trying to download a
5220                  // block from.
5221                  CNodeState &state = *State(pnode->GetId());
5222                  if (now - pnode->m_connected > MINIMUM_CONNECT_TIME && state.vBlocksInFlight.empty()) {
5223                      LogDebug(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement);
5224                      pnode->fDisconnect = true;
5225                      return true;
5226                  } else {
5227                      LogDebug(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
5228                               pnode->GetId(), count_seconds(pnode->m_connected), state.vBlocksInFlight.size());
5229                      return false;
5230                  }
5231              });
5232              if (disconnected) {
5233                  // If we disconnected an extra peer, that means we successfully
5234                  // connected to at least one peer after the last time we
5235                  // detected a stale tip. Don't try any more extra peers until
5236                  // we next detect a stale tip, to limit the load we put on the
5237                  // network from these extra connections.
5238                  m_connman.SetTryNewOutboundPeer(false);
5239              }
5240          }
5241      }
5242  }
5243  
5244  void PeerManagerImpl::CheckForStaleTipAndEvictPeers()
5245  {
5246      LOCK(cs_main);
5247  
5248      auto now{GetTime<std::chrono::seconds>()};
5249  
5250      EvictExtraOutboundPeers(now);
5251  
5252      if (now > m_stale_tip_check_time) {
5253          // Check whether our tip is stale, and if so, allow using an extra
5254          // outbound peer
5255          if (!m_chainman.m_blockman.LoadingBlocks() && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) {
5256              LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n",
5257                        count_seconds(now - m_last_tip_update.load()));
5258              m_connman.SetTryNewOutboundPeer(true);
5259          } else if (m_connman.GetTryNewOutboundPeer()) {
5260              m_connman.SetTryNewOutboundPeer(false);
5261          }
5262          m_stale_tip_check_time = now + STALE_CHECK_INTERVAL;
5263      }
5264  
5265      if (!m_initial_sync_finished && CanDirectFetch()) {
5266          m_connman.StartExtraBlockRelayPeers();
5267          m_initial_sync_finished = true;
5268      }
5269  }
5270  
5271  void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now)
5272  {
5273      if (m_connman.ShouldRunInactivityChecks(node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) &&
5274          peer.m_ping_nonce_sent &&
5275          now > peer.m_ping_start.load() + TIMEOUT_INTERVAL)
5276      {
5277          // The ping timeout is using mocktime. To disable the check during
5278          // testing, increase -peertimeout.
5279          LogDebug(BCLog::NET, "ping timeout: %fs, %s", 0.000001 * count_microseconds(now - peer.m_ping_start.load()), node_to.DisconnectMsg(fLogIPs));
5280          node_to.fDisconnect = true;
5281          return;
5282      }
5283  
5284      bool pingSend = false;
5285  
5286      if (peer.m_ping_queued) {
5287          // RPC ping request by user
5288          pingSend = true;
5289      }
5290  
5291      if (peer.m_ping_nonce_sent == 0 && now > peer.m_ping_start.load() + PING_INTERVAL) {
5292          // Ping automatically sent as a latency probe & keepalive.
5293          pingSend = true;
5294      }
5295  
5296      if (pingSend) {
5297          uint64_t nonce;
5298          do {
5299              nonce = FastRandomContext().rand64();
5300          } while (nonce == 0);
5301          peer.m_ping_queued = false;
5302          peer.m_ping_start = now;
5303          if (node_to.GetCommonVersion() > BIP0031_VERSION) {
5304              peer.m_ping_nonce_sent = nonce;
5305              MakeAndPushMessage(node_to, NetMsgType::PING, nonce);
5306          } else {
5307              // Peer is too old to support ping command with nonce, pong will never arrive.
5308              peer.m_ping_nonce_sent = 0;
5309              MakeAndPushMessage(node_to, NetMsgType::PING);
5310          }
5311      }
5312  }
5313  
5314  void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time)
5315  {
5316      // Nothing to do for non-address-relay peers
5317      if (!peer.m_addr_relay_enabled) return;
5318  
5319      LOCK(peer.m_addr_send_times_mutex);
5320      // Periodically advertise our local address to the peer.
5321      if (fListen && !m_chainman.IsInitialBlockDownload() &&
5322          peer.m_next_local_addr_send < current_time) {
5323          // If we've sent before, clear the bloom filter for the peer, so that our
5324          // self-announcement will actually go out.
5325          // This might be unnecessary if the bloom filter has already rolled
5326          // over since our last self-announcement, but there is only a small
5327          // bandwidth cost that we can incur by doing this (which happens
5328          // once a day on average).
5329          if (peer.m_next_local_addr_send != 0us) {
5330              peer.m_addr_known->reset();
5331          }
5332          if (std::optional<CService> local_service = GetLocalAddrForPeer(node)) {
5333              CAddress local_addr{*local_service, peer.m_our_services, Now<NodeSeconds>()};
5334              PushAddress(peer, local_addr);
5335          }
5336          peer.m_next_local_addr_send = current_time + m_rng.rand_exp_duration(AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
5337      }
5338  
5339      // We sent an `addr` message to this peer recently. Nothing more to do.
5340      if (current_time <= peer.m_next_addr_send) return;
5341  
5342      peer.m_next_addr_send = current_time + m_rng.rand_exp_duration(AVG_ADDRESS_BROADCAST_INTERVAL);
5343  
5344      if (!Assume(peer.m_addrs_to_send.size() <= MAX_ADDR_TO_SEND)) {
5345          // Should be impossible since we always check size before adding to
5346          // m_addrs_to_send. Recover by trimming the vector.
5347          peer.m_addrs_to_send.resize(MAX_ADDR_TO_SEND);
5348      }
5349  
5350      // Remove addr records that the peer already knows about, and add new
5351      // addrs to the m_addr_known filter on the same pass.
5352      auto addr_already_known = [&peer](const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) {
5353          bool ret = peer.m_addr_known->contains(addr.GetKey());
5354          if (!ret) peer.m_addr_known->insert(addr.GetKey());
5355          return ret;
5356      };
5357      peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(), peer.m_addrs_to_send.end(), addr_already_known),
5358                             peer.m_addrs_to_send.end());
5359  
5360      // No addr messages to send
5361      if (peer.m_addrs_to_send.empty()) return;
5362  
5363      if (peer.m_wants_addrv2) {
5364          MakeAndPushMessage(node, NetMsgType::ADDRV2, CAddress::V2_NETWORK(peer.m_addrs_to_send));
5365      } else {
5366          MakeAndPushMessage(node, NetMsgType::ADDR, CAddress::V1_NETWORK(peer.m_addrs_to_send));
5367      }
5368      peer.m_addrs_to_send.clear();
5369  
5370      // we only send the big addr message once
5371      if (peer.m_addrs_to_send.capacity() > 40) {
5372          peer.m_addrs_to_send.shrink_to_fit();
5373      }
5374  }
5375  
5376  void PeerManagerImpl::MaybeSendSendHeaders(CNode& node, Peer& peer)
5377  {
5378      // Delay sending SENDHEADERS (BIP 130) until we're done with an
5379      // initial-headers-sync with this peer. Receiving headers announcements for
5380      // new blocks while trying to sync their headers chain is problematic,
5381      // because of the state tracking done.
5382      if (!peer.m_sent_sendheaders && node.GetCommonVersion() >= SENDHEADERS_VERSION) {
5383          LOCK(cs_main);
5384          CNodeState &state = *State(node.GetId());
5385          if (state.pindexBestKnownBlock != nullptr &&
5386                  state.pindexBestKnownBlock->nChainWork > m_chainman.MinimumChainWork()) {
5387              // Tell our peer we prefer to receive headers rather than inv's
5388              // We send this to non-NODE NETWORK peers as well, because even
5389              // non-NODE NETWORK peers can announce blocks (such as pruning
5390              // nodes)
5391              MakeAndPushMessage(node, NetMsgType::SENDHEADERS);
5392              peer.m_sent_sendheaders = true;
5393          }
5394      }
5395  }
5396  
5397  void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::microseconds current_time)
5398  {
5399      if (m_opts.ignore_incoming_txs) return;
5400      if (pto.GetCommonVersion() < FEEFILTER_VERSION) return;
5401      // peers with the forcerelay permission should not filter txs to us
5402      if (pto.HasPermission(NetPermissionFlags::ForceRelay)) return;
5403      // Don't send feefilter messages to outbound block-relay-only peers since they should never announce
5404      // transactions to us, regardless of feefilter state.
5405      if (pto.IsBlockOnlyConn()) return;
5406  
5407      CAmount currentFilter = m_mempool.GetMinFee().GetFeePerK();
5408  
5409      if (m_chainman.IsInitialBlockDownload()) {
5410          // Received tx-inv messages are discarded when the active
5411          // chainstate is in IBD, so tell the peer to not send them.
5412          currentFilter = MAX_MONEY;
5413      } else {
5414          static const CAmount MAX_FILTER{m_fee_filter_rounder.round(MAX_MONEY)};
5415          if (peer.m_fee_filter_sent == MAX_FILTER) {
5416              // Send the current filter if we sent MAX_FILTER previously
5417              // and made it out of IBD.
5418              peer.m_next_send_feefilter = 0us;
5419          }
5420      }
5421      if (current_time > peer.m_next_send_feefilter) {
5422          CAmount filterToSend = m_fee_filter_rounder.round(currentFilter);
5423          // We always have a fee filter of at least the min relay fee
5424          filterToSend = std::max(filterToSend, m_mempool.m_opts.min_relay_feerate.GetFeePerK());
5425          if (filterToSend != peer.m_fee_filter_sent) {
5426              MakeAndPushMessage(pto, NetMsgType::FEEFILTER, filterToSend);
5427              peer.m_fee_filter_sent = filterToSend;
5428          }
5429          peer.m_next_send_feefilter = current_time + m_rng.rand_exp_duration(AVG_FEEFILTER_BROADCAST_INTERVAL);
5430      }
5431      // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
5432      // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
5433      else if (current_time + MAX_FEEFILTER_CHANGE_DELAY < peer.m_next_send_feefilter &&
5434                  (currentFilter < 3 * peer.m_fee_filter_sent / 4 || currentFilter > 4 * peer.m_fee_filter_sent / 3)) {
5435          peer.m_next_send_feefilter = current_time + m_rng.randrange<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY);
5436      }
5437  }
5438  
5439  namespace {
5440  class CompareInvMempoolOrder
5441  {
5442      CTxMemPool* mp;
5443      bool m_wtxid_relay;
5444  public:
5445      explicit CompareInvMempoolOrder(CTxMemPool *_mempool, bool use_wtxid)
5446      {
5447          mp = _mempool;
5448          m_wtxid_relay = use_wtxid;
5449      }
5450  
5451      bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
5452      {
5453          /* As std::make_heap produces a max-heap, we want the entries with the
5454           * fewest ancestors/highest fee to sort later. */
5455          return mp->CompareDepthAndScore(*b, *a, m_wtxid_relay);
5456      }
5457  };
5458  } // namespace
5459  
5460  bool PeerManagerImpl::RejectIncomingTxs(const CNode& peer) const
5461  {
5462      // block-relay-only peers may never send txs to us
5463      if (peer.IsBlockOnlyConn()) return true;
5464      if (peer.IsFeelerConn()) return true;
5465      // In -blocksonly mode, peers need the 'relay' permission to send txs to us
5466      if (m_opts.ignore_incoming_txs && !peer.HasPermission(NetPermissionFlags::Relay)) return true;
5467      return false;
5468  }
5469  
5470  bool PeerManagerImpl::SetupAddressRelay(const CNode& node, Peer& peer)
5471  {
5472      // We don't participate in addr relay with outbound block-relay-only
5473      // connections to prevent providing adversaries with the additional
5474      // information of addr traffic to infer the link.
5475      if (node.IsBlockOnlyConn()) return false;
5476  
5477      if (!peer.m_addr_relay_enabled.exchange(true)) {
5478          // During version message processing (non-block-relay-only outbound peers)
5479          // or on first addr-related message we have received (inbound peers), initialize
5480          // m_addr_known.
5481          peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
5482      }
5483  
5484      return true;
5485  }
5486  
5487  bool PeerManagerImpl::SendMessages(CNode* pto)
5488  {
5489      AssertLockNotHeld(m_tx_download_mutex);
5490      AssertLockHeld(g_msgproc_mutex);
5491  
5492      PeerRef peer = GetPeerRef(pto->GetId());
5493      if (!peer) return false;
5494      const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
5495  
5496      // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
5497      // disconnect misbehaving peers even before the version handshake is complete.
5498      if (MaybeDiscourageAndDisconnect(*pto, *peer)) return true;
5499  
5500      // Initiate version handshake for outbound connections
5501      if (!pto->IsInboundConn() && !peer->m_outbound_version_message_sent) {
5502          PushNodeVersion(*pto, *peer);
5503          peer->m_outbound_version_message_sent = true;
5504      }
5505  
5506      // Don't send anything until the version handshake is complete
5507      if (!pto->fSuccessfullyConnected || pto->fDisconnect)
5508          return true;
5509  
5510      const auto current_time{GetTime<std::chrono::microseconds>()};
5511  
5512      if (pto->IsAddrFetchConn() && current_time - pto->m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) {
5513          LogDebug(BCLog::NET, "addrfetch connection timeout, %s\n", pto->DisconnectMsg(fLogIPs));
5514          pto->fDisconnect = true;
5515          return true;
5516      }
5517  
5518      MaybeSendPing(*pto, *peer, current_time);
5519  
5520      // MaybeSendPing may have marked peer for disconnection
5521      if (pto->fDisconnect) return true;
5522  
5523      MaybeSendAddr(*pto, *peer, current_time);
5524  
5525      MaybeSendSendHeaders(*pto, *peer);
5526  
5527      {
5528          LOCK(cs_main);
5529  
5530          CNodeState &state = *State(pto->GetId());
5531  
5532          // Start block sync
5533          if (m_chainman.m_best_header == nullptr) {
5534              m_chainman.m_best_header = m_chainman.ActiveChain().Tip();
5535          }
5536  
5537          // Determine whether we might try initial headers sync or parallel
5538          // block download from this peer -- this mostly affects behavior while
5539          // in IBD (once out of IBD, we sync from all peers).
5540          bool sync_blocks_and_headers_from_peer = false;
5541          if (state.fPreferredDownload) {
5542              sync_blocks_and_headers_from_peer = true;
5543          } else if (CanServeBlocks(*peer) && !pto->IsAddrFetchConn()) {
5544              // Typically this is an inbound peer. If we don't have any outbound
5545              // peers, or if we aren't downloading any blocks from such peers,
5546              // then allow block downloads from this peer, too.
5547              // We prefer downloading blocks from outbound peers to avoid
5548              // putting undue load on (say) some home user who is just making
5549              // outbound connections to the network, but if our only source of
5550              // the latest blocks is from an inbound peer, we have to be sure to
5551              // eventually download it (and not just wait indefinitely for an
5552              // outbound peer to have it).
5553              if (m_num_preferred_download_peers == 0 || mapBlocksInFlight.empty()) {
5554                  sync_blocks_and_headers_from_peer = true;
5555              }
5556          }
5557  
5558          if (!state.fSyncStarted && CanServeBlocks(*peer) && !m_chainman.m_blockman.LoadingBlocks()) {
5559              // Only actively request headers from a single peer, unless we're close to today.
5560              if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->Time() > NodeClock::now() - 24h) {
5561                  const CBlockIndex* pindexStart = m_chainman.m_best_header;
5562                  /* If possible, start at the block preceding the currently
5563                     best known header.  This ensures that we always get a
5564                     non-empty list of headers back as long as the peer
5565                     is up-to-date.  With a non-empty response, we can initialise
5566                     the peer's known best block.  This wouldn't be possible
5567                     if we requested starting at m_chainman.m_best_header and
5568                     got back an empty response.  */
5569                  if (pindexStart->pprev)
5570                      pindexStart = pindexStart->pprev;
5571                  if (MaybeSendGetHeaders(*pto, GetLocator(pindexStart), *peer)) {
5572                      LogDebug(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height);
5573  
5574                      state.fSyncStarted = true;
5575                      peer->m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
5576                          (
5577                           // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
5578                           // to maintain precision
5579                           std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
5580                           Ticks<std::chrono::seconds>(NodeClock::now() - m_chainman.m_best_header->Time()) / consensusParams.nPowTargetSpacing
5581                          );
5582                      nSyncStarted++;
5583                  }
5584              }
5585          }
5586  
5587          //
5588          // Try sending block announcements via headers
5589          //
5590          {
5591              // If we have no more than MAX_BLOCKS_TO_ANNOUNCE in our
5592              // list of block hashes we're relaying, and our peer wants
5593              // headers announcements, then find the first header
5594              // not yet known to our peer but would connect, and send.
5595              // If no header would connect, or if we have too many
5596              // blocks, or if the peer doesn't want headers, just
5597              // add all to the inv queue.
5598              LOCK(peer->m_block_inv_mutex);
5599              std::vector<CBlock> vHeaders;
5600              bool fRevertToInv = ((!peer->m_prefers_headers &&
5601                                   (!state.m_requested_hb_cmpctblocks || peer->m_blocks_for_headers_relay.size() > 1)) ||
5602                                   peer->m_blocks_for_headers_relay.size() > MAX_BLOCKS_TO_ANNOUNCE);
5603              const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery
5604              ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date
5605  
5606              if (!fRevertToInv) {
5607                  bool fFoundStartingHeader = false;
5608                  // Try to find first header that our peer doesn't have, and
5609                  // then send all headers past that one.  If we come across any
5610                  // headers that aren't on m_chainman.ActiveChain(), give up.
5611                  for (const uint256& hash : peer->m_blocks_for_headers_relay) {
5612                      const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
5613                      assert(pindex);
5614                      if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
5615                          // Bail out if we reorged away from this block
5616                          fRevertToInv = true;
5617                          break;
5618                      }
5619                      if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
5620                          // This means that the list of blocks to announce don't
5621                          // connect to each other.
5622                          // This shouldn't really be possible to hit during
5623                          // regular operation (because reorgs should take us to
5624                          // a chain that has some block not on the prior chain,
5625                          // which should be caught by the prior check), but one
5626                          // way this could happen is by using invalidateblock /
5627                          // reconsiderblock repeatedly on the tip, causing it to
5628                          // be added multiple times to m_blocks_for_headers_relay.
5629                          // Robustly deal with this rare situation by reverting
5630                          // to an inv.
5631                          fRevertToInv = true;
5632                          break;
5633                      }
5634                      pBestIndex = pindex;
5635                      if (fFoundStartingHeader) {
5636                          // add this to the headers message
5637                          vHeaders.emplace_back(pindex->GetBlockHeader());
5638                      } else if (PeerHasHeader(&state, pindex)) {
5639                          continue; // keep looking for the first new block
5640                      } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) {
5641                          // Peer doesn't have this header but they do have the prior one.
5642                          // Start sending headers.
5643                          fFoundStartingHeader = true;
5644                          vHeaders.emplace_back(pindex->GetBlockHeader());
5645                      } else {
5646                          // Peer doesn't have this header or the prior one -- nothing will
5647                          // connect, so bail out.
5648                          fRevertToInv = true;
5649                          break;
5650                      }
5651                  }
5652              }
5653              if (!fRevertToInv && !vHeaders.empty()) {
5654                  if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) {
5655                      // We only send up to 1 block as header-and-ids, as otherwise
5656                      // probably means we're doing an initial-ish-sync or they're slow
5657                      LogDebug(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
5658                              vHeaders.front().GetHash().ToString(), pto->GetId());
5659  
5660                      std::optional<CSerializedNetMsg> cached_cmpctblock_msg;
5661                      {
5662                          LOCK(m_most_recent_block_mutex);
5663                          if (m_most_recent_block_hash == pBestIndex->GetBlockHash()) {
5664                              cached_cmpctblock_msg = NetMsg::Make(NetMsgType::CMPCTBLOCK, *m_most_recent_compact_block);
5665                          }
5666                      }
5667                      if (cached_cmpctblock_msg.has_value()) {
5668                          PushMessage(*pto, std::move(cached_cmpctblock_msg.value()));
5669                      } else {
5670                          CBlock block;
5671                          const bool ret{m_chainman.m_blockman.ReadBlock(block, *pBestIndex)};
5672                          assert(ret);
5673                          CBlockHeaderAndShortTxIDs cmpctblock{block, m_rng.rand64()};
5674                          MakeAndPushMessage(*pto, NetMsgType::CMPCTBLOCK, cmpctblock);
5675                      }
5676                      state.pindexBestHeaderSent = pBestIndex;
5677                  } else if (peer->m_prefers_headers) {
5678                      if (vHeaders.size() > 1) {
5679                          LogDebug(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
5680                                  vHeaders.size(),
5681                                  vHeaders.front().GetHash().ToString(),
5682                                  vHeaders.back().GetHash().ToString(), pto->GetId());
5683                      } else {
5684                          LogDebug(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
5685                                  vHeaders.front().GetHash().ToString(), pto->GetId());
5686                      }
5687                      MakeAndPushMessage(*pto, NetMsgType::HEADERS, TX_WITH_WITNESS(vHeaders));
5688                      state.pindexBestHeaderSent = pBestIndex;
5689                  } else
5690                      fRevertToInv = true;
5691              }
5692              if (fRevertToInv) {
5693                  // If falling back to using an inv, just try to inv the tip.
5694                  // The last entry in m_blocks_for_headers_relay was our tip at some point
5695                  // in the past.
5696                  if (!peer->m_blocks_for_headers_relay.empty()) {
5697                      const uint256& hashToAnnounce = peer->m_blocks_for_headers_relay.back();
5698                      const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hashToAnnounce);
5699                      assert(pindex);
5700  
5701                      // Warn if we're announcing a block that is not on the main chain.
5702                      // This should be very rare and could be optimized out.
5703                      // Just log for now.
5704                      if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
5705                          LogDebug(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n",
5706                              hashToAnnounce.ToString(), m_chainman.ActiveChain().Tip()->GetBlockHash().ToString());
5707                      }
5708  
5709                      // If the peer's chain has this block, don't inv it back.
5710                      if (!PeerHasHeader(&state, pindex)) {
5711                          peer->m_blocks_for_inv_relay.push_back(hashToAnnounce);
5712                          LogDebug(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
5713                              pto->GetId(), hashToAnnounce.ToString());
5714                      }
5715                  }
5716              }
5717              peer->m_blocks_for_headers_relay.clear();
5718          }
5719  
5720          //
5721          // Message: inventory
5722          //
5723          std::vector<CInv> vInv;
5724          {
5725              LOCK(peer->m_block_inv_mutex);
5726              vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(), INVENTORY_BROADCAST_TARGET));
5727  
5728              // Add blocks
5729              for (const uint256& hash : peer->m_blocks_for_inv_relay) {
5730                  vInv.emplace_back(MSG_BLOCK, hash);
5731                  if (vInv.size() == MAX_INV_SZ) {
5732                      MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
5733                      vInv.clear();
5734                  }
5735              }
5736              peer->m_blocks_for_inv_relay.clear();
5737          }
5738  
5739          if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
5740                  LOCK(tx_relay->m_tx_inventory_mutex);
5741                  // Check whether periodic sends should happen
5742                  bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan);
5743                  if (tx_relay->m_next_inv_send_time < current_time) {
5744                      fSendTrickle = true;
5745                      if (pto->IsInboundConn()) {
5746                          tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
5747                      } else {
5748                          tx_relay->m_next_inv_send_time = current_time + m_rng.rand_exp_duration(OUTBOUND_INVENTORY_BROADCAST_INTERVAL);
5749                      }
5750                  }
5751  
5752                  // Time to send but the peer has requested we not relay transactions.
5753                  if (fSendTrickle) {
5754                      LOCK(tx_relay->m_bloom_filter_mutex);
5755                      if (!tx_relay->m_relay_txs) tx_relay->m_tx_inventory_to_send.clear();
5756                  }
5757  
5758                  // Respond to BIP35 mempool requests
5759                  if (fSendTrickle && tx_relay->m_send_mempool) {
5760                      auto vtxinfo = m_mempool.infoAll();
5761                      tx_relay->m_send_mempool = false;
5762                      const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()};
5763  
5764                      LOCK(tx_relay->m_bloom_filter_mutex);
5765  
5766                      for (const auto& txinfo : vtxinfo) {
5767                          CInv inv{
5768                              peer->m_wtxid_relay ? MSG_WTX : MSG_TX,
5769                              peer->m_wtxid_relay ?
5770                                  txinfo.tx->GetWitnessHash().ToUint256() :
5771                                  txinfo.tx->GetHash().ToUint256(),
5772                          };
5773                          tx_relay->m_tx_inventory_to_send.erase(inv.hash);
5774  
5775                          // Don't send transactions that peers will not put into their mempool
5776                          if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
5777                              continue;
5778                          }
5779                          if (tx_relay->m_bloom_filter) {
5780                              if (!tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
5781                          }
5782                          tx_relay->m_tx_inventory_known_filter.insert(inv.hash);
5783                          vInv.push_back(inv);
5784                          if (vInv.size() == MAX_INV_SZ) {
5785                              MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
5786                              vInv.clear();
5787                          }
5788                      }
5789                  }
5790  
5791                  // Determine transactions to relay
5792                  if (fSendTrickle) {
5793                      // Produce a vector with all candidates for sending
5794                      std::vector<std::set<uint256>::iterator> vInvTx;
5795                      vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size());
5796                      for (std::set<uint256>::iterator it = tx_relay->m_tx_inventory_to_send.begin(); it != tx_relay->m_tx_inventory_to_send.end(); it++) {
5797                          vInvTx.push_back(it);
5798                      }
5799                      const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()};
5800                      // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
5801                      // A heap is used so that not all items need sorting if only a few are being sent.
5802                      CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool, peer->m_wtxid_relay);
5803                      std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
5804                      // No reason to drain out at many times the network's capacity,
5805                      // especially since we have many peers and some will draw much shorter delays.
5806                      unsigned int nRelayedTransactions = 0;
5807                      LOCK(tx_relay->m_bloom_filter_mutex);
5808                      size_t broadcast_max{INVENTORY_BROADCAST_TARGET + (tx_relay->m_tx_inventory_to_send.size()/1000)*5};
5809                      broadcast_max = std::min<size_t>(INVENTORY_BROADCAST_MAX, broadcast_max);
5810                      while (!vInvTx.empty() && nRelayedTransactions < broadcast_max) {
5811                          // Fetch the top element from the heap
5812                          std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
5813                          std::set<uint256>::iterator it = vInvTx.back();
5814                          vInvTx.pop_back();
5815                          uint256 hash = *it;
5816                          CInv inv(peer->m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
5817                          // Remove it from the to-be-sent set
5818                          tx_relay->m_tx_inventory_to_send.erase(it);
5819                          // Check if not in the filter already
5820                          if (tx_relay->m_tx_inventory_known_filter.contains(hash)) {
5821                              continue;
5822                          }
5823                          // Not in the mempool anymore? don't bother sending it.
5824                          auto txinfo = m_mempool.info(ToGenTxid(inv));
5825                          if (!txinfo.tx) {
5826                              continue;
5827                          }
5828                          // Peer told you to not send transactions at that feerate? Don't bother sending it.
5829                          if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
5830                              continue;
5831                          }
5832                          if (tx_relay->m_bloom_filter && !tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
5833                          // Send
5834                          vInv.push_back(inv);
5835                          nRelayedTransactions++;
5836                          if (vInv.size() == MAX_INV_SZ) {
5837                              MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
5838                              vInv.clear();
5839                          }
5840                          tx_relay->m_tx_inventory_known_filter.insert(hash);
5841                      }
5842  
5843                      // Ensure we'll respond to GETDATA requests for anything we've just announced
5844                      LOCK(m_mempool.cs);
5845                      tx_relay->m_last_inv_sequence = m_mempool.GetSequence();
5846                  }
5847          }
5848          if (!vInv.empty())
5849              MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
5850  
5851          // Detect whether we're stalling
5852          auto stalling_timeout = m_block_stalling_timeout.load();
5853          if (state.m_stalling_since.count() && state.m_stalling_since < current_time - stalling_timeout) {
5854              // Stalling only triggers when the block download window cannot move. During normal steady state,
5855              // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
5856              // should only happen during initial block download.
5857              LogInfo("Peer is stalling block download, %s\n", pto->DisconnectMsg(fLogIPs));
5858              pto->fDisconnect = true;
5859              // Increase timeout for the next peer so that we don't disconnect multiple peers if our own
5860              // bandwidth is insufficient.
5861              const auto new_timeout = std::min(2 * stalling_timeout, BLOCK_STALLING_TIMEOUT_MAX);
5862              if (stalling_timeout != new_timeout && m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
5863                  LogDebug(BCLog::NET, "Increased stalling timeout temporarily to %d seconds\n", count_seconds(new_timeout));
5864              }
5865              return true;
5866          }
5867          // In case there is a block that has been in flight from this peer for block_interval * (1 + 0.5 * N)
5868          // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
5869          // We compensate for other peers to prevent killing off peers due to our own downstream link
5870          // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
5871          // to unreasonably increase our timeout.
5872          if (state.vBlocksInFlight.size() > 0) {
5873              QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
5874              int nOtherPeersWithValidatedDownloads = m_peers_downloading_from - 1;
5875              if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
5876                  LogInfo("Timeout downloading block %s, %s\n", queuedBlock.pindex->GetBlockHash().ToString(), pto->DisconnectMsg(fLogIPs));
5877                  pto->fDisconnect = true;
5878                  return true;
5879              }
5880          }
5881          // Check for headers sync timeouts
5882          if (state.fSyncStarted && peer->m_headers_sync_timeout < std::chrono::microseconds::max()) {
5883              // Detect whether this is a stalling initial-headers-sync peer
5884              if (m_chainman.m_best_header->Time() <= NodeClock::now() - 24h) {
5885                  if (current_time > peer->m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)) {
5886                      // Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer,
5887                      // and we have others we could be using instead.
5888                      // Note: If all our peers are inbound, then we won't
5889                      // disconnect our sync peer for stalling; we have bigger
5890                      // problems if we can't get any outbound peers.
5891                      if (!pto->HasPermission(NetPermissionFlags::NoBan)) {
5892                          LogInfo("Timeout downloading headers, %s\n", pto->DisconnectMsg(fLogIPs));
5893                          pto->fDisconnect = true;
5894                          return true;
5895                      } else {
5896                          LogInfo("Timeout downloading headers from noban peer, not %s\n", pto->DisconnectMsg(fLogIPs));
5897                          // Reset the headers sync state so that we have a
5898                          // chance to try downloading from a different peer.
5899                          // Note: this will also result in at least one more
5900                          // getheaders message to be sent to
5901                          // this peer (eventually).
5902                          state.fSyncStarted = false;
5903                          nSyncStarted--;
5904                          peer->m_headers_sync_timeout = 0us;
5905                      }
5906                  }
5907              } else {
5908                  // After we've caught up once, reset the timeout so we can't trigger
5909                  // disconnect later.
5910                  peer->m_headers_sync_timeout = std::chrono::microseconds::max();
5911              }
5912          }
5913  
5914          // Check that outbound peers have reasonable chains
5915          // GetTime() is used by this anti-DoS logic so we can test this using mocktime
5916          ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>());
5917  
5918          //
5919          // Message: getdata (blocks)
5920          //
5921          std::vector<CInv> vGetData;
5922          if (CanServeBlocks(*peer) && ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) || !m_chainman.IsInitialBlockDownload()) && state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
5923              std::vector<const CBlockIndex*> vToDownload;
5924              NodeId staller = -1;
5925              auto get_inflight_budget = [&state]() {
5926                  return std::max(0, MAX_BLOCKS_IN_TRANSIT_PER_PEER - static_cast<int>(state.vBlocksInFlight.size()));
5927              };
5928  
5929              // If a snapshot chainstate is in use, we want to find its next blocks
5930              // before the background chainstate to prioritize getting to network tip.
5931              FindNextBlocksToDownload(*peer, get_inflight_budget(), vToDownload, staller);
5932              if (m_chainman.BackgroundSyncInProgress() && !IsLimitedPeer(*peer)) {
5933                  // If the background tip is not an ancestor of the snapshot block,
5934                  // we need to start requesting blocks from their last common ancestor.
5935                  const CBlockIndex *from_tip = LastCommonAncestor(m_chainman.GetBackgroundSyncTip(), m_chainman.GetSnapshotBaseBlock());
5936                  TryDownloadingHistoricalBlocks(
5937                      *peer,
5938                      get_inflight_budget(),
5939                      vToDownload, from_tip,
5940                      Assert(m_chainman.GetSnapshotBaseBlock()));
5941              }
5942              for (const CBlockIndex *pindex : vToDownload) {
5943                  uint32_t nFetchFlags = GetFetchFlags(*peer);
5944                  vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash());
5945                  BlockRequested(pto->GetId(), *pindex);
5946                  LogDebug(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
5947                      pindex->nHeight, pto->GetId());
5948              }
5949              if (state.vBlocksInFlight.empty() && staller != -1) {
5950                  if (State(staller)->m_stalling_since == 0us) {
5951                      State(staller)->m_stalling_since = current_time;
5952                      LogDebug(BCLog::NET, "Stall started peer=%d\n", staller);
5953                  }
5954              }
5955          }
5956  
5957          //
5958          // Message: getdata (transactions)
5959          //
5960          {
5961              LOCK(m_tx_download_mutex);
5962              for (const GenTxid& gtxid : m_txdownloadman.GetRequestsToSend(pto->GetId(), current_time)) {
5963                  vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*peer)), gtxid.GetHash());
5964                  if (vGetData.size() >= MAX_GETDATA_SZ) {
5965                      MakeAndPushMessage(*pto, NetMsgType::GETDATA, vGetData);
5966                      vGetData.clear();
5967                  }
5968              }
5969          }
5970  
5971          if (!vGetData.empty())
5972              MakeAndPushMessage(*pto, NetMsgType::GETDATA, vGetData);
5973      } // release cs_main
5974      MaybeSendFeefilter(*pto, *peer, current_time);
5975      return true;
5976  }