/ src / net.cpp
net.cpp
   1  // Copyright (c) 2009-2010 Satoshi Nakamoto
   2  // Copyright (c) 2009-2022 The Bitcoin Core developers
   3  // Distributed under the MIT software license, see the accompanying
   4  // file COPYING or http://www.opensource.org/licenses/mit-license.php.
   5  
   6  #if defined(HAVE_CONFIG_H)
   7  #include <config/bitcoin-config.h>
   8  #endif
   9  
  10  #include <net.h>
  11  
  12  #include <addrdb.h>
  13  #include <addrman.h>
  14  #include <banman.h>
  15  #include <clientversion.h>
  16  #include <common/args.h>
  17  #include <compat/compat.h>
  18  #include <consensus/consensus.h>
  19  #include <crypto/sha256.h>
  20  #include <i2p.h>
  21  #include <key.h>
  22  #include <logging.h>
  23  #include <memusage.h>
  24  #include <net_permissions.h>
  25  #include <netaddress.h>
  26  #include <netbase.h>
  27  #include <node/eviction.h>
  28  #include <node/interface_ui.h>
  29  #include <protocol.h>
  30  #include <random.h>
  31  #include <scheduler.h>
  32  #include <util/fs.h>
  33  #include <util/sock.h>
  34  #include <util/strencodings.h>
  35  #include <util/thread.h>
  36  #include <util/threadinterrupt.h>
  37  #include <util/trace.h>
  38  #include <util/translation.h>
  39  #include <util/vector.h>
  40  
  41  #ifdef WIN32
  42  #include <string.h>
  43  #endif
  44  
  45  #if HAVE_DECL_GETIFADDRS && HAVE_DECL_FREEIFADDRS
  46  #include <ifaddrs.h>
  47  #endif
  48  
  49  #include <algorithm>
  50  #include <array>
  51  #include <cstdint>
  52  #include <functional>
  53  #include <optional>
  54  #include <unordered_map>
  55  
  56  #include <math.h>
  57  
  58  /** Maximum number of block-relay-only anchor connections */
  59  static constexpr size_t MAX_BLOCK_RELAY_ONLY_ANCHORS = 2;
  60  static_assert (MAX_BLOCK_RELAY_ONLY_ANCHORS <= static_cast<size_t>(MAX_BLOCK_RELAY_ONLY_CONNECTIONS), "MAX_BLOCK_RELAY_ONLY_ANCHORS must not exceed MAX_BLOCK_RELAY_ONLY_CONNECTIONS.");
  61  /** Anchor IP address database file name */
  62  const char* const ANCHORS_DATABASE_FILENAME = "anchors.dat";
  63  
  64  // How often to dump addresses to peers.dat
  65  static constexpr std::chrono::minutes DUMP_PEERS_INTERVAL{15};
  66  
  67  /** Number of DNS seeds to query when the number of connections is low. */
  68  static constexpr int DNSSEEDS_TO_QUERY_AT_ONCE = 3;
  69  
  70  /** How long to delay before querying DNS seeds
  71   *
  72   * If we have more than THRESHOLD entries in addrman, then it's likely
  73   * that we got those addresses from having previously connected to the P2P
  74   * network, and that we'll be able to successfully reconnect to the P2P
  75   * network via contacting one of them. So if that's the case, spend a
  76   * little longer trying to connect to known peers before querying the
  77   * DNS seeds.
  78   */
  79  static constexpr std::chrono::seconds DNSSEEDS_DELAY_FEW_PEERS{11};
  80  static constexpr std::chrono::minutes DNSSEEDS_DELAY_MANY_PEERS{5};
  81  static constexpr int DNSSEEDS_DELAY_PEER_THRESHOLD = 1000; // "many" vs "few" peers
  82  
  83  /** The default timeframe for -maxuploadtarget. 1 day. */
  84  static constexpr std::chrono::seconds MAX_UPLOAD_TIMEFRAME{60 * 60 * 24};
  85  
  86  // A random time period (0 to 1 seconds) is added to feeler connections to prevent synchronization.
  87  static constexpr auto FEELER_SLEEP_WINDOW{1s};
  88  
  89  /** Frequency to attempt extra connections to reachable networks we're not connected to yet **/
  90  static constexpr auto EXTRA_NETWORK_PEER_INTERVAL{5min};
  91  
  92  /** Used to pass flags to the Bind() function */
  93  enum BindFlags {
  94      BF_NONE         = 0,
  95      BF_REPORT_ERROR = (1U << 0),
  96      /**
  97       * Do not call AddLocal() for our special addresses, e.g., for incoming
  98       * Tor connections, to prevent gossiping them over the network.
  99       */
 100      BF_DONT_ADVERTISE = (1U << 1),
 101  };
 102  
 103  // The set of sockets cannot be modified while waiting
 104  // The sleep time needs to be small to avoid new sockets stalling
 105  static const uint64_t SELECT_TIMEOUT_MILLISECONDS = 50;
 106  
 107  const std::string NET_MESSAGE_TYPE_OTHER = "*other*";
 108  
 109  static const uint64_t RANDOMIZER_ID_NETGROUP = 0x6c0edd8036ef4036ULL; // SHA256("netgroup")[0:8]
 110  static const uint64_t RANDOMIZER_ID_LOCALHOSTNONCE = 0xd93e69e2bbfa5735ULL; // SHA256("localhostnonce")[0:8]
 111  static const uint64_t RANDOMIZER_ID_ADDRCACHE = 0x1cf2e4ddd306dda9ULL; // SHA256("addrcache")[0:8]
 112  //
 113  // Global state variables
 114  //
 115  bool fDiscover = true;
 116  bool fListen = true;
 117  GlobalMutex g_maplocalhost_mutex;
 118  std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(g_maplocalhost_mutex);
 119  std::string strSubVersion;
 120  
 121  size_t CSerializedNetMsg::GetMemoryUsage() const noexcept
 122  {
 123      // Don't count the dynamic memory used for the m_type string, by assuming it fits in the
 124      // "small string" optimization area (which stores data inside the object itself, up to some
 125      // size; 15 bytes in modern libstdc++).
 126      return sizeof(*this) + memusage::DynamicUsage(data);
 127  }
 128  
 129  void CConnman::AddAddrFetch(const std::string& strDest)
 130  {
 131      LOCK(m_addr_fetches_mutex);
 132      m_addr_fetches.push_back(strDest);
 133  }
 134  
 135  uint16_t GetListenPort()
 136  {
 137      // If -bind= is provided with ":port" part, use that (first one if multiple are provided).
 138      for (const std::string& bind_arg : gArgs.GetArgs("-bind")) {
 139          constexpr uint16_t dummy_port = 0;
 140  
 141          const std::optional<CService> bind_addr{Lookup(bind_arg, dummy_port, /*fAllowLookup=*/false)};
 142          if (bind_addr.has_value() && bind_addr->GetPort() != dummy_port) return bind_addr->GetPort();
 143      }
 144  
 145      // Otherwise, if -whitebind= without NetPermissionFlags::NoBan is provided, use that
 146      // (-whitebind= is required to have ":port").
 147      for (const std::string& whitebind_arg : gArgs.GetArgs("-whitebind")) {
 148          NetWhitebindPermissions whitebind;
 149          bilingual_str error;
 150          if (NetWhitebindPermissions::TryParse(whitebind_arg, whitebind, error)) {
 151              if (!NetPermissions::HasFlag(whitebind.m_flags, NetPermissionFlags::NoBan)) {
 152                  return whitebind.m_service.GetPort();
 153              }
 154          }
 155      }
 156  
 157      // Otherwise, if -port= is provided, use that. Otherwise use the default port.
 158      return static_cast<uint16_t>(gArgs.GetIntArg("-port", Params().GetDefaultPort()));
 159  }
 160  
 161  // Determine the "best" local address for a particular peer.
 162  [[nodiscard]] static std::optional<CService> GetLocal(const CNode& peer)
 163  {
 164      if (!fListen) return std::nullopt;
 165  
 166      std::optional<CService> addr;
 167      int nBestScore = -1;
 168      int nBestReachability = -1;
 169      {
 170          LOCK(g_maplocalhost_mutex);
 171          for (const auto& [local_addr, local_service_info] : mapLocalHost) {
 172              // For privacy reasons, don't advertise our privacy-network address
 173              // to other networks and don't advertise our other-network address
 174              // to privacy networks.
 175              if (local_addr.GetNetwork() != peer.ConnectedThroughNetwork()
 176                  && (local_addr.IsPrivacyNet() || peer.IsConnectedThroughPrivacyNet())) {
 177                  continue;
 178              }
 179              const int nScore{local_service_info.nScore};
 180              const int nReachability{local_addr.GetReachabilityFrom(peer.addr)};
 181              if (nReachability > nBestReachability || (nReachability == nBestReachability && nScore > nBestScore)) {
 182                  addr.emplace(CService{local_addr, local_service_info.nPort});
 183                  nBestReachability = nReachability;
 184                  nBestScore = nScore;
 185              }
 186          }
 187      }
 188      return addr;
 189  }
 190  
 191  //! Convert the serialized seeds into usable address objects.
 192  static std::vector<CAddress> ConvertSeeds(const std::vector<uint8_t> &vSeedsIn)
 193  {
 194      // It'll only connect to one or two seed nodes because once it connects,
 195      // it'll get a pile of addresses with newer timestamps.
 196      // Seed nodes are given a random 'last seen time' of between one and two
 197      // weeks ago.
 198      const auto one_week{7 * 24h};
 199      std::vector<CAddress> vSeedsOut;
 200      FastRandomContext rng;
 201      DataStream underlying_stream{vSeedsIn};
 202      ParamsStream s{CAddress::V2_NETWORK, underlying_stream};
 203      while (!s.eof()) {
 204          CService endpoint;
 205          s >> endpoint;
 206          CAddress addr{endpoint, SeedsServiceFlags()};
 207          addr.nTime = rng.rand_uniform_delay(Now<NodeSeconds>() - one_week, -one_week);
 208          LogPrint(BCLog::NET, "Added hardcoded seed: %s\n", addr.ToStringAddrPort());
 209          vSeedsOut.push_back(addr);
 210      }
 211      return vSeedsOut;
 212  }
 213  
 214  // Determine the "best" local address for a particular peer.
 215  // If none, return the unroutable 0.0.0.0 but filled in with
 216  // the normal parameters, since the IP may be changed to a useful
 217  // one by discovery.
 218  CService GetLocalAddress(const CNode& peer)
 219  {
 220      return GetLocal(peer).value_or(CService{CNetAddr(), GetListenPort()});
 221  }
 222  
 223  static int GetnScore(const CService& addr)
 224  {
 225      LOCK(g_maplocalhost_mutex);
 226      const auto it = mapLocalHost.find(addr);
 227      return (it != mapLocalHost.end()) ? it->second.nScore : 0;
 228  }
 229  
 230  // Is our peer's addrLocal potentially useful as an external IP source?
 231  [[nodiscard]] static bool IsPeerAddrLocalGood(CNode *pnode)
 232  {
 233      CService addrLocal = pnode->GetAddrLocal();
 234      return fDiscover && pnode->addr.IsRoutable() && addrLocal.IsRoutable() &&
 235             g_reachable_nets.Contains(addrLocal);
 236  }
 237  
 238  std::optional<CService> GetLocalAddrForPeer(CNode& node)
 239  {
 240      CService addrLocal{GetLocalAddress(node)};
 241      // If discovery is enabled, sometimes give our peer the address it
 242      // tells us that it sees us as in case it has a better idea of our
 243      // address than we do.
 244      FastRandomContext rng;
 245      if (IsPeerAddrLocalGood(&node) && (!addrLocal.IsRoutable() ||
 246           rng.randbits((GetnScore(addrLocal) > LOCAL_MANUAL) ? 3 : 1) == 0))
 247      {
 248          if (node.IsInboundConn()) {
 249              // For inbound connections, assume both the address and the port
 250              // as seen from the peer.
 251              addrLocal = CService{node.GetAddrLocal()};
 252          } else {
 253              // For outbound connections, assume just the address as seen from
 254              // the peer and leave the port in `addrLocal` as returned by
 255              // `GetLocalAddress()` above. The peer has no way to observe our
 256              // listening port when we have initiated the connection.
 257              addrLocal.SetIP(node.GetAddrLocal());
 258          }
 259      }
 260      if (addrLocal.IsRoutable()) {
 261          LogPrint(BCLog::NET, "Advertising address %s to peer=%d\n", addrLocal.ToStringAddrPort(), node.GetId());
 262          return addrLocal;
 263      }
 264      // Address is unroutable. Don't advertise.
 265      return std::nullopt;
 266  }
 267  
 268  // learn a new local address
 269  bool AddLocal(const CService& addr_, int nScore)
 270  {
 271      CService addr{MaybeFlipIPv6toCJDNS(addr_)};
 272  
 273      if (!addr.IsRoutable())
 274          return false;
 275  
 276      if (!fDiscover && nScore < LOCAL_MANUAL)
 277          return false;
 278  
 279      if (!g_reachable_nets.Contains(addr))
 280          return false;
 281  
 282      LogPrintf("AddLocal(%s,%i)\n", addr.ToStringAddrPort(), nScore);
 283  
 284      {
 285          LOCK(g_maplocalhost_mutex);
 286          const auto [it, is_newly_added] = mapLocalHost.emplace(addr, LocalServiceInfo());
 287          LocalServiceInfo &info = it->second;
 288          if (is_newly_added || nScore >= info.nScore) {
 289              info.nScore = nScore + (is_newly_added ? 0 : 1);
 290              info.nPort = addr.GetPort();
 291          }
 292      }
 293  
 294      return true;
 295  }
 296  
 297  bool AddLocal(const CNetAddr &addr, int nScore)
 298  {
 299      return AddLocal(CService(addr, GetListenPort()), nScore);
 300  }
 301  
 302  void RemoveLocal(const CService& addr)
 303  {
 304      LOCK(g_maplocalhost_mutex);
 305      LogPrintf("RemoveLocal(%s)\n", addr.ToStringAddrPort());
 306      mapLocalHost.erase(addr);
 307  }
 308  
 309  /** vote for a local address */
 310  bool SeenLocal(const CService& addr)
 311  {
 312      LOCK(g_maplocalhost_mutex);
 313      const auto it = mapLocalHost.find(addr);
 314      if (it == mapLocalHost.end()) return false;
 315      ++it->second.nScore;
 316      return true;
 317  }
 318  
 319  
 320  /** check whether a given address is potentially local */
 321  bool IsLocal(const CService& addr)
 322  {
 323      LOCK(g_maplocalhost_mutex);
 324      return mapLocalHost.count(addr) > 0;
 325  }
 326  
 327  CNode* CConnman::FindNode(const CNetAddr& ip)
 328  {
 329      LOCK(m_nodes_mutex);
 330      for (CNode* pnode : m_nodes) {
 331        if (static_cast<CNetAddr>(pnode->addr) == ip) {
 332              return pnode;
 333          }
 334      }
 335      return nullptr;
 336  }
 337  
 338  CNode* CConnman::FindNode(const std::string& addrName)
 339  {
 340      LOCK(m_nodes_mutex);
 341      for (CNode* pnode : m_nodes) {
 342          if (pnode->m_addr_name == addrName) {
 343              return pnode;
 344          }
 345      }
 346      return nullptr;
 347  }
 348  
 349  CNode* CConnman::FindNode(const CService& addr)
 350  {
 351      LOCK(m_nodes_mutex);
 352      for (CNode* pnode : m_nodes) {
 353          if (static_cast<CService>(pnode->addr) == addr) {
 354              return pnode;
 355          }
 356      }
 357      return nullptr;
 358  }
 359  
 360  bool CConnman::AlreadyConnectedToAddress(const CAddress& addr)
 361  {
 362      return FindNode(static_cast<CNetAddr>(addr)) || FindNode(addr.ToStringAddrPort());
 363  }
 364  
 365  bool CConnman::CheckIncomingNonce(uint64_t nonce)
 366  {
 367      LOCK(m_nodes_mutex);
 368      for (const CNode* pnode : m_nodes) {
 369          if (!pnode->fSuccessfullyConnected && !pnode->IsInboundConn() && pnode->GetLocalNonce() == nonce)
 370              return false;
 371      }
 372      return true;
 373  }
 374  
 375  /** Get the bind address for a socket as CAddress */
 376  static CAddress GetBindAddress(const Sock& sock)
 377  {
 378      CAddress addr_bind;
 379      struct sockaddr_storage sockaddr_bind;
 380      socklen_t sockaddr_bind_len = sizeof(sockaddr_bind);
 381      if (!sock.GetSockName((struct sockaddr*)&sockaddr_bind, &sockaddr_bind_len)) {
 382          addr_bind.SetSockAddr((const struct sockaddr*)&sockaddr_bind);
 383      } else {
 384          LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "getsockname failed\n");
 385      }
 386      return addr_bind;
 387  }
 388  
 389  CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type, bool use_v2transport)
 390  {
 391      AssertLockNotHeld(m_unused_i2p_sessions_mutex);
 392      assert(conn_type != ConnectionType::INBOUND);
 393  
 394      if (pszDest == nullptr) {
 395          if (IsLocal(addrConnect))
 396              return nullptr;
 397  
 398          // Look for an existing connection
 399          CNode* pnode = FindNode(static_cast<CService>(addrConnect));
 400          if (pnode)
 401          {
 402              LogPrintf("Failed to open new connection, already connected\n");
 403              return nullptr;
 404          }
 405      }
 406  
 407      LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "trying %s connection %s lastseen=%.1fhrs\n",
 408          use_v2transport ? "v2" : "v1",
 409          pszDest ? pszDest : addrConnect.ToStringAddrPort(),
 410          Ticks<HoursDouble>(pszDest ? 0h : Now<NodeSeconds>() - addrConnect.nTime));
 411  
 412      // Resolve
 413      const uint16_t default_port{pszDest != nullptr ? GetDefaultPort(pszDest) :
 414                                                       m_params.GetDefaultPort()};
 415      if (pszDest) {
 416          std::vector<CService> resolved{Lookup(pszDest, default_port, fNameLookup && !HaveNameProxy(), 256)};
 417          if (!resolved.empty()) {
 418              Shuffle(resolved.begin(), resolved.end(), FastRandomContext());
 419              // If the connection is made by name, it can be the case that the name resolves to more than one address.
 420              // We don't want to connect any more of them if we are already connected to one
 421              for (const auto& r : resolved) {
 422                  addrConnect = CAddress{MaybeFlipIPv6toCJDNS(r), NODE_NONE};
 423                  if (!addrConnect.IsValid()) {
 424                      LogPrint(BCLog::NET, "Resolver returned invalid address %s for %s\n", addrConnect.ToStringAddrPort(), pszDest);
 425                      return nullptr;
 426                  }
 427                  // It is possible that we already have a connection to the IP/port pszDest resolved to.
 428                  // In that case, drop the connection that was just created.
 429                  LOCK(m_nodes_mutex);
 430                  CNode* pnode = FindNode(static_cast<CService>(addrConnect));
 431                  if (pnode) {
 432                      LogPrintf("Not opening a connection to %s, already connected to %s\n", pszDest, addrConnect.ToStringAddrPort());
 433                      return nullptr;
 434                  }
 435              }
 436          }
 437      }
 438  
 439      // Connect
 440      std::unique_ptr<Sock> sock;
 441      Proxy proxy;
 442      CAddress addr_bind;
 443      assert(!addr_bind.IsValid());
 444      std::unique_ptr<i2p::sam::Session> i2p_transient_session;
 445  
 446      if (addrConnect.IsValid()) {
 447          const bool use_proxy{GetProxy(addrConnect.GetNetwork(), proxy)};
 448          bool proxyConnectionFailed = false;
 449  
 450          if (addrConnect.IsI2P() && use_proxy) {
 451              i2p::Connection conn;
 452              bool connected{false};
 453  
 454              if (m_i2p_sam_session) {
 455                  connected = m_i2p_sam_session->Connect(addrConnect, conn, proxyConnectionFailed);
 456              } else {
 457                  {
 458                      LOCK(m_unused_i2p_sessions_mutex);
 459                      if (m_unused_i2p_sessions.empty()) {
 460                          i2p_transient_session =
 461                              std::make_unique<i2p::sam::Session>(proxy, &interruptNet);
 462                      } else {
 463                          i2p_transient_session.swap(m_unused_i2p_sessions.front());
 464                          m_unused_i2p_sessions.pop();
 465                      }
 466                  }
 467                  connected = i2p_transient_session->Connect(addrConnect, conn, proxyConnectionFailed);
 468                  if (!connected) {
 469                      LOCK(m_unused_i2p_sessions_mutex);
 470                      if (m_unused_i2p_sessions.size() < MAX_UNUSED_I2P_SESSIONS_SIZE) {
 471                          m_unused_i2p_sessions.emplace(i2p_transient_session.release());
 472                      }
 473                  }
 474              }
 475  
 476              if (connected) {
 477                  sock = std::move(conn.sock);
 478                  addr_bind = CAddress{conn.me, NODE_NONE};
 479              }
 480          } else if (use_proxy) {
 481              LogPrintLevel(BCLog::PROXY, BCLog::Level::Debug, "Using proxy: %s to connect to %s:%s\n", proxy.ToString(), addrConnect.ToStringAddr(), addrConnect.GetPort());
 482              sock = ConnectThroughProxy(proxy, addrConnect.ToStringAddr(), addrConnect.GetPort(), proxyConnectionFailed);
 483          } else {
 484              // no proxy needed (none set for target network)
 485              sock = ConnectDirectly(addrConnect, conn_type == ConnectionType::MANUAL);
 486          }
 487          if (!proxyConnectionFailed) {
 488              // If a connection to the node was attempted, and failure (if any) is not caused by a problem connecting to
 489              // the proxy, mark this as an attempt.
 490              addrman.Attempt(addrConnect, fCountFailure);
 491          }
 492      } else if (pszDest && GetNameProxy(proxy)) {
 493          std::string host;
 494          uint16_t port{default_port};
 495          SplitHostPort(std::string(pszDest), port, host);
 496          bool proxyConnectionFailed;
 497          sock = ConnectThroughProxy(proxy, host, port, proxyConnectionFailed);
 498      }
 499      if (!sock) {
 500          return nullptr;
 501      }
 502  
 503      NetPermissionFlags permission_flags = NetPermissionFlags::None;
 504      std::vector<NetWhitelistPermissions> whitelist_permissions = conn_type == ConnectionType::MANUAL ? vWhitelistedRangeOutgoing : std::vector<NetWhitelistPermissions>{};
 505      AddWhitelistPermissionFlags(permission_flags, addrConnect, whitelist_permissions);
 506  
 507      // Add node
 508      NodeId id = GetNewNodeId();
 509      uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize();
 510      if (!addr_bind.IsValid()) {
 511          addr_bind = GetBindAddress(*sock);
 512      }
 513      CNode* pnode = new CNode(id,
 514                               std::move(sock),
 515                               addrConnect,
 516                               CalculateKeyedNetGroup(addrConnect),
 517                               nonce,
 518                               addr_bind,
 519                               pszDest ? pszDest : "",
 520                               conn_type,
 521                               /*inbound_onion=*/false,
 522                               CNodeOptions{
 523                                   .permission_flags = permission_flags,
 524                                   .i2p_sam_session = std::move(i2p_transient_session),
 525                                   .recv_flood_size = nReceiveFloodSize,
 526                                   .use_v2transport = use_v2transport,
 527                               });
 528      pnode->AddRef();
 529  
 530      // We're making a new connection, harvest entropy from the time (and our peer count)
 531      RandAddEvent((uint32_t)id);
 532  
 533      return pnode;
 534  }
 535  
 536  void CNode::CloseSocketDisconnect()
 537  {
 538      fDisconnect = true;
 539      LOCK(m_sock_mutex);
 540      if (m_sock) {
 541          LogPrint(BCLog::NET, "disconnecting peer=%d\n", id);
 542          m_sock.reset();
 543      }
 544      m_i2p_sam_session.reset();
 545  }
 546  
 547  void CConnman::AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr, const std::vector<NetWhitelistPermissions>& ranges) const {
 548      for (const auto& subnet : ranges) {
 549          if (subnet.m_subnet.Match(addr)) {
 550              NetPermissions::AddFlag(flags, subnet.m_flags);
 551          }
 552      }
 553      if (NetPermissions::HasFlag(flags, NetPermissionFlags::Implicit)) {
 554          NetPermissions::ClearFlag(flags, NetPermissionFlags::Implicit);
 555          if (whitelist_forcerelay) NetPermissions::AddFlag(flags, NetPermissionFlags::ForceRelay);
 556          if (whitelist_relay) NetPermissions::AddFlag(flags, NetPermissionFlags::Relay);
 557          NetPermissions::AddFlag(flags, NetPermissionFlags::Mempool);
 558          NetPermissions::AddFlag(flags, NetPermissionFlags::NoBan);
 559      }
 560  }
 561  
 562  CService CNode::GetAddrLocal() const
 563  {
 564      AssertLockNotHeld(m_addr_local_mutex);
 565      LOCK(m_addr_local_mutex);
 566      return addrLocal;
 567  }
 568  
 569  void CNode::SetAddrLocal(const CService& addrLocalIn) {
 570      AssertLockNotHeld(m_addr_local_mutex);
 571      LOCK(m_addr_local_mutex);
 572      if (addrLocal.IsValid()) {
 573          LogError("Addr local already set for node: %i. Refusing to change from %s to %s\n", id, addrLocal.ToStringAddrPort(), addrLocalIn.ToStringAddrPort());
 574      } else {
 575          addrLocal = addrLocalIn;
 576      }
 577  }
 578  
 579  Network CNode::ConnectedThroughNetwork() const
 580  {
 581      return m_inbound_onion ? NET_ONION : addr.GetNetClass();
 582  }
 583  
 584  bool CNode::IsConnectedThroughPrivacyNet() const
 585  {
 586      return m_inbound_onion || addr.IsPrivacyNet();
 587  }
 588  
 589  #undef X
 590  #define X(name) stats.name = name
 591  void CNode::CopyStats(CNodeStats& stats)
 592  {
 593      stats.nodeid = this->GetId();
 594      X(addr);
 595      X(addrBind);
 596      stats.m_network = ConnectedThroughNetwork();
 597      X(m_last_send);
 598      X(m_last_recv);
 599      X(m_last_tx_time);
 600      X(m_last_block_time);
 601      X(m_connected);
 602      X(nTimeOffset);
 603      X(m_addr_name);
 604      X(nVersion);
 605      {
 606          LOCK(m_subver_mutex);
 607          X(cleanSubVer);
 608      }
 609      stats.fInbound = IsInboundConn();
 610      X(m_bip152_highbandwidth_to);
 611      X(m_bip152_highbandwidth_from);
 612      {
 613          LOCK(cs_vSend);
 614          X(mapSendBytesPerMsgType);
 615          X(nSendBytes);
 616      }
 617      {
 618          LOCK(cs_vRecv);
 619          X(mapRecvBytesPerMsgType);
 620          X(nRecvBytes);
 621          Transport::Info info = m_transport->GetInfo();
 622          stats.m_transport_type = info.transport_type;
 623          if (info.session_id) stats.m_session_id = HexStr(*info.session_id);
 624      }
 625      X(m_permission_flags);
 626  
 627      X(m_last_ping_time);
 628      X(m_min_ping_time);
 629  
 630      // Leave string empty if addrLocal invalid (not filled in yet)
 631      CService addrLocalUnlocked = GetAddrLocal();
 632      stats.addrLocal = addrLocalUnlocked.IsValid() ? addrLocalUnlocked.ToStringAddrPort() : "";
 633  
 634      X(m_conn_type);
 635  }
 636  #undef X
 637  
 638  bool CNode::ReceiveMsgBytes(Span<const uint8_t> msg_bytes, bool& complete)
 639  {
 640      complete = false;
 641      const auto time = GetTime<std::chrono::microseconds>();
 642      LOCK(cs_vRecv);
 643      m_last_recv = std::chrono::duration_cast<std::chrono::seconds>(time);
 644      nRecvBytes += msg_bytes.size();
 645      while (msg_bytes.size() > 0) {
 646          // absorb network data
 647          if (!m_transport->ReceivedBytes(msg_bytes)) {
 648              // Serious transport problem, disconnect from the peer.
 649              return false;
 650          }
 651  
 652          if (m_transport->ReceivedMessageComplete()) {
 653              // decompose a transport agnostic CNetMessage from the deserializer
 654              bool reject_message{false};
 655              CNetMessage msg = m_transport->GetReceivedMessage(time, reject_message);
 656              if (reject_message) {
 657                  // Message deserialization failed. Drop the message but don't disconnect the peer.
 658                  // store the size of the corrupt message
 659                  mapRecvBytesPerMsgType.at(NET_MESSAGE_TYPE_OTHER) += msg.m_raw_message_size;
 660                  continue;
 661              }
 662  
 663              // Store received bytes per message type.
 664              // To prevent a memory DOS, only allow known message types.
 665              auto i = mapRecvBytesPerMsgType.find(msg.m_type);
 666              if (i == mapRecvBytesPerMsgType.end()) {
 667                  i = mapRecvBytesPerMsgType.find(NET_MESSAGE_TYPE_OTHER);
 668              }
 669              assert(i != mapRecvBytesPerMsgType.end());
 670              i->second += msg.m_raw_message_size;
 671  
 672              // push the message to the process queue,
 673              vRecvMsg.push_back(std::move(msg));
 674  
 675              complete = true;
 676          }
 677      }
 678  
 679      return true;
 680  }
 681  
 682  V1Transport::V1Transport(const NodeId node_id) noexcept
 683      : m_magic_bytes{Params().MessageStart()}, m_node_id{node_id}
 684  {
 685      LOCK(m_recv_mutex);
 686      Reset();
 687  }
 688  
 689  Transport::Info V1Transport::GetInfo() const noexcept
 690  {
 691      return {.transport_type = TransportProtocolType::V1, .session_id = {}};
 692  }
 693  
 694  int V1Transport::readHeader(Span<const uint8_t> msg_bytes)
 695  {
 696      AssertLockHeld(m_recv_mutex);
 697      // copy data to temporary parsing buffer
 698      unsigned int nRemaining = CMessageHeader::HEADER_SIZE - nHdrPos;
 699      unsigned int nCopy = std::min<unsigned int>(nRemaining, msg_bytes.size());
 700  
 701      memcpy(&hdrbuf[nHdrPos], msg_bytes.data(), nCopy);
 702      nHdrPos += nCopy;
 703  
 704      // if header incomplete, exit
 705      if (nHdrPos < CMessageHeader::HEADER_SIZE)
 706          return nCopy;
 707  
 708      // deserialize to CMessageHeader
 709      try {
 710          hdrbuf >> hdr;
 711      }
 712      catch (const std::exception&) {
 713          LogPrint(BCLog::NET, "Header error: Unable to deserialize, peer=%d\n", m_node_id);
 714          return -1;
 715      }
 716  
 717      // Check start string, network magic
 718      if (hdr.pchMessageStart != m_magic_bytes) {
 719          LogPrint(BCLog::NET, "Header error: Wrong MessageStart %s received, peer=%d\n", HexStr(hdr.pchMessageStart), m_node_id);
 720          return -1;
 721      }
 722  
 723      // reject messages larger than MAX_SIZE or MAX_PROTOCOL_MESSAGE_LENGTH
 724      if (hdr.nMessageSize > MAX_SIZE || hdr.nMessageSize > MAX_PROTOCOL_MESSAGE_LENGTH) {
 725          LogPrint(BCLog::NET, "Header error: Size too large (%s, %u bytes), peer=%d\n", SanitizeString(hdr.GetCommand()), hdr.nMessageSize, m_node_id);
 726          return -1;
 727      }
 728  
 729      // switch state to reading message data
 730      in_data = true;
 731  
 732      return nCopy;
 733  }
 734  
 735  int V1Transport::readData(Span<const uint8_t> msg_bytes)
 736  {
 737      AssertLockHeld(m_recv_mutex);
 738      unsigned int nRemaining = hdr.nMessageSize - nDataPos;
 739      unsigned int nCopy = std::min<unsigned int>(nRemaining, msg_bytes.size());
 740  
 741      if (vRecv.size() < nDataPos + nCopy) {
 742          // Allocate up to 256 KiB ahead, but never more than the total message size.
 743          vRecv.resize(std::min(hdr.nMessageSize, nDataPos + nCopy + 256 * 1024));
 744      }
 745  
 746      hasher.Write(msg_bytes.first(nCopy));
 747      memcpy(&vRecv[nDataPos], msg_bytes.data(), nCopy);
 748      nDataPos += nCopy;
 749  
 750      return nCopy;
 751  }
 752  
 753  const uint256& V1Transport::GetMessageHash() const
 754  {
 755      AssertLockHeld(m_recv_mutex);
 756      assert(CompleteInternal());
 757      if (data_hash.IsNull())
 758          hasher.Finalize(data_hash);
 759      return data_hash;
 760  }
 761  
 762  CNetMessage V1Transport::GetReceivedMessage(const std::chrono::microseconds time, bool& reject_message)
 763  {
 764      AssertLockNotHeld(m_recv_mutex);
 765      // Initialize out parameter
 766      reject_message = false;
 767      // decompose a single CNetMessage from the TransportDeserializer
 768      LOCK(m_recv_mutex);
 769      CNetMessage msg(std::move(vRecv));
 770  
 771      // store message type string, time, and sizes
 772      msg.m_type = hdr.GetCommand();
 773      msg.m_time = time;
 774      msg.m_message_size = hdr.nMessageSize;
 775      msg.m_raw_message_size = hdr.nMessageSize + CMessageHeader::HEADER_SIZE;
 776  
 777      uint256 hash = GetMessageHash();
 778  
 779      // We just received a message off the wire, harvest entropy from the time (and the message checksum)
 780      RandAddEvent(ReadLE32(hash.begin()));
 781  
 782      // Check checksum and header message type string
 783      if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0) {
 784          LogPrint(BCLog::NET, "Header error: Wrong checksum (%s, %u bytes), expected %s was %s, peer=%d\n",
 785                   SanitizeString(msg.m_type), msg.m_message_size,
 786                   HexStr(Span{hash}.first(CMessageHeader::CHECKSUM_SIZE)),
 787                   HexStr(hdr.pchChecksum),
 788                   m_node_id);
 789          reject_message = true;
 790      } else if (!hdr.IsCommandValid()) {
 791          LogPrint(BCLog::NET, "Header error: Invalid message type (%s, %u bytes), peer=%d\n",
 792                   SanitizeString(hdr.GetCommand()), msg.m_message_size, m_node_id);
 793          reject_message = true;
 794      }
 795  
 796      // Always reset the network deserializer (prepare for the next message)
 797      Reset();
 798      return msg;
 799  }
 800  
 801  bool V1Transport::SetMessageToSend(CSerializedNetMsg& msg) noexcept
 802  {
 803      AssertLockNotHeld(m_send_mutex);
 804      // Determine whether a new message can be set.
 805      LOCK(m_send_mutex);
 806      if (m_sending_header || m_bytes_sent < m_message_to_send.data.size()) return false;
 807  
 808      // create dbl-sha256 checksum
 809      uint256 hash = Hash(msg.data);
 810  
 811      // create header
 812      CMessageHeader hdr(m_magic_bytes, msg.m_type.c_str(), msg.data.size());
 813      memcpy(hdr.pchChecksum, hash.begin(), CMessageHeader::CHECKSUM_SIZE);
 814  
 815      // serialize header
 816      m_header_to_send.clear();
 817      VectorWriter{m_header_to_send, 0, hdr};
 818  
 819      // update state
 820      m_message_to_send = std::move(msg);
 821      m_sending_header = true;
 822      m_bytes_sent = 0;
 823      return true;
 824  }
 825  
 826  Transport::BytesToSend V1Transport::GetBytesToSend(bool have_next_message) const noexcept
 827  {
 828      AssertLockNotHeld(m_send_mutex);
 829      LOCK(m_send_mutex);
 830      if (m_sending_header) {
 831          return {Span{m_header_to_send}.subspan(m_bytes_sent),
 832                  // We have more to send after the header if the message has payload, or if there
 833                  // is a next message after that.
 834                  have_next_message || !m_message_to_send.data.empty(),
 835                  m_message_to_send.m_type
 836                 };
 837      } else {
 838          return {Span{m_message_to_send.data}.subspan(m_bytes_sent),
 839                  // We only have more to send after this message's payload if there is another
 840                  // message.
 841                  have_next_message,
 842                  m_message_to_send.m_type
 843                 };
 844      }
 845  }
 846  
 847  void V1Transport::MarkBytesSent(size_t bytes_sent) noexcept
 848  {
 849      AssertLockNotHeld(m_send_mutex);
 850      LOCK(m_send_mutex);
 851      m_bytes_sent += bytes_sent;
 852      if (m_sending_header && m_bytes_sent == m_header_to_send.size()) {
 853          // We're done sending a message's header. Switch to sending its data bytes.
 854          m_sending_header = false;
 855          m_bytes_sent = 0;
 856      } else if (!m_sending_header && m_bytes_sent == m_message_to_send.data.size()) {
 857          // We're done sending a message's data. Wipe the data vector to reduce memory consumption.
 858          ClearShrink(m_message_to_send.data);
 859          m_bytes_sent = 0;
 860      }
 861  }
 862  
 863  size_t V1Transport::GetSendMemoryUsage() const noexcept
 864  {
 865      AssertLockNotHeld(m_send_mutex);
 866      LOCK(m_send_mutex);
 867      // Don't count sending-side fields besides m_message_to_send, as they're all small and bounded.
 868      return m_message_to_send.GetMemoryUsage();
 869  }
 870  
 871  namespace {
 872  
 873  /** List of short messages as defined in BIP324, in order.
 874   *
 875   * Only message types that are actually implemented in this codebase need to be listed, as other
 876   * messages get ignored anyway - whether we know how to decode them or not.
 877   */
 878  const std::array<std::string, 33> V2_MESSAGE_IDS = {
 879      "", // 12 bytes follow encoding the message type like in V1
 880      NetMsgType::ADDR,
 881      NetMsgType::BLOCK,
 882      NetMsgType::BLOCKTXN,
 883      NetMsgType::CMPCTBLOCK,
 884      NetMsgType::FEEFILTER,
 885      NetMsgType::FILTERADD,
 886      NetMsgType::FILTERCLEAR,
 887      NetMsgType::FILTERLOAD,
 888      NetMsgType::GETBLOCKS,
 889      NetMsgType::GETBLOCKTXN,
 890      NetMsgType::GETDATA,
 891      NetMsgType::GETHEADERS,
 892      NetMsgType::HEADERS,
 893      NetMsgType::INV,
 894      NetMsgType::MEMPOOL,
 895      NetMsgType::MERKLEBLOCK,
 896      NetMsgType::NOTFOUND,
 897      NetMsgType::PING,
 898      NetMsgType::PONG,
 899      NetMsgType::SENDCMPCT,
 900      NetMsgType::TX,
 901      NetMsgType::GETCFILTERS,
 902      NetMsgType::CFILTER,
 903      NetMsgType::GETCFHEADERS,
 904      NetMsgType::CFHEADERS,
 905      NetMsgType::GETCFCHECKPT,
 906      NetMsgType::CFCHECKPT,
 907      NetMsgType::ADDRV2,
 908      // Unimplemented message types that are assigned in BIP324:
 909      "",
 910      "",
 911      "",
 912      ""
 913  };
 914  
 915  class V2MessageMap
 916  {
 917      std::unordered_map<std::string, uint8_t> m_map;
 918  
 919  public:
 920      V2MessageMap() noexcept
 921      {
 922          for (size_t i = 1; i < std::size(V2_MESSAGE_IDS); ++i) {
 923              m_map.emplace(V2_MESSAGE_IDS[i], i);
 924          }
 925      }
 926  
 927      std::optional<uint8_t> operator()(const std::string& message_name) const noexcept
 928      {
 929          auto it = m_map.find(message_name);
 930          if (it == m_map.end()) return std::nullopt;
 931          return it->second;
 932      }
 933  };
 934  
 935  const V2MessageMap V2_MESSAGE_MAP;
 936  
 937  std::vector<uint8_t> GenerateRandomGarbage() noexcept
 938  {
 939      std::vector<uint8_t> ret;
 940      FastRandomContext rng;
 941      ret.resize(rng.randrange(V2Transport::MAX_GARBAGE_LEN + 1));
 942      rng.fillrand(MakeWritableByteSpan(ret));
 943      return ret;
 944  }
 945  
 946  } // namespace
 947  
 948  void V2Transport::StartSendingHandshake() noexcept
 949  {
 950      AssertLockHeld(m_send_mutex);
 951      Assume(m_send_state == SendState::AWAITING_KEY);
 952      Assume(m_send_buffer.empty());
 953      // Initialize the send buffer with ellswift pubkey + provided garbage.
 954      m_send_buffer.resize(EllSwiftPubKey::size() + m_send_garbage.size());
 955      std::copy(std::begin(m_cipher.GetOurPubKey()), std::end(m_cipher.GetOurPubKey()), MakeWritableByteSpan(m_send_buffer).begin());
 956      std::copy(m_send_garbage.begin(), m_send_garbage.end(), m_send_buffer.begin() + EllSwiftPubKey::size());
 957      // We cannot wipe m_send_garbage as it will still be used as AAD later in the handshake.
 958  }
 959  
 960  V2Transport::V2Transport(NodeId nodeid, bool initiating, const CKey& key, Span<const std::byte> ent32, std::vector<uint8_t> garbage) noexcept
 961      : m_cipher{key, ent32}, m_initiating{initiating}, m_nodeid{nodeid},
 962        m_v1_fallback{nodeid},
 963        m_recv_state{initiating ? RecvState::KEY : RecvState::KEY_MAYBE_V1},
 964        m_send_garbage{std::move(garbage)},
 965        m_send_state{initiating ? SendState::AWAITING_KEY : SendState::MAYBE_V1}
 966  {
 967      Assume(m_send_garbage.size() <= MAX_GARBAGE_LEN);
 968      // Start sending immediately if we're the initiator of the connection.
 969      if (initiating) {
 970          LOCK(m_send_mutex);
 971          StartSendingHandshake();
 972      }
 973  }
 974  
 975  V2Transport::V2Transport(NodeId nodeid, bool initiating) noexcept
 976      : V2Transport{nodeid, initiating, GenerateRandomKey(),
 977                    MakeByteSpan(GetRandHash()), GenerateRandomGarbage()} {}
 978  
 979  void V2Transport::SetReceiveState(RecvState recv_state) noexcept
 980  {
 981      AssertLockHeld(m_recv_mutex);
 982      // Enforce allowed state transitions.
 983      switch (m_recv_state) {
 984      case RecvState::KEY_MAYBE_V1:
 985          Assume(recv_state == RecvState::KEY || recv_state == RecvState::V1);
 986          break;
 987      case RecvState::KEY:
 988          Assume(recv_state == RecvState::GARB_GARBTERM);
 989          break;
 990      case RecvState::GARB_GARBTERM:
 991          Assume(recv_state == RecvState::VERSION);
 992          break;
 993      case RecvState::VERSION:
 994          Assume(recv_state == RecvState::APP);
 995          break;
 996      case RecvState::APP:
 997          Assume(recv_state == RecvState::APP_READY);
 998          break;
 999      case RecvState::APP_READY:
1000          Assume(recv_state == RecvState::APP);
1001          break;
1002      case RecvState::V1:
1003          Assume(false); // V1 state cannot be left
1004          break;
1005      }
1006      // Change state.
1007      m_recv_state = recv_state;
1008  }
1009  
1010  void V2Transport::SetSendState(SendState send_state) noexcept
1011  {
1012      AssertLockHeld(m_send_mutex);
1013      // Enforce allowed state transitions.
1014      switch (m_send_state) {
1015      case SendState::MAYBE_V1:
1016          Assume(send_state == SendState::V1 || send_state == SendState::AWAITING_KEY);
1017          break;
1018      case SendState::AWAITING_KEY:
1019          Assume(send_state == SendState::READY);
1020          break;
1021      case SendState::READY:
1022      case SendState::V1:
1023          Assume(false); // Final states
1024          break;
1025      }
1026      // Change state.
1027      m_send_state = send_state;
1028  }
1029  
1030  bool V2Transport::ReceivedMessageComplete() const noexcept
1031  {
1032      AssertLockNotHeld(m_recv_mutex);
1033      LOCK(m_recv_mutex);
1034      if (m_recv_state == RecvState::V1) return m_v1_fallback.ReceivedMessageComplete();
1035  
1036      return m_recv_state == RecvState::APP_READY;
1037  }
1038  
1039  void V2Transport::ProcessReceivedMaybeV1Bytes() noexcept
1040  {
1041      AssertLockHeld(m_recv_mutex);
1042      AssertLockNotHeld(m_send_mutex);
1043      Assume(m_recv_state == RecvState::KEY_MAYBE_V1);
1044      // We still have to determine if this is a v1 or v2 connection. The bytes being received could
1045      // be the beginning of either a v1 packet (network magic + "version\x00\x00\x00\x00\x00"), or
1046      // of a v2 public key. BIP324 specifies that a mismatch with this 16-byte string should trigger
1047      // sending of the key.
1048      std::array<uint8_t, V1_PREFIX_LEN> v1_prefix = {0, 0, 0, 0, 'v', 'e', 'r', 's', 'i', 'o', 'n', 0, 0, 0, 0, 0};
1049      std::copy(std::begin(Params().MessageStart()), std::end(Params().MessageStart()), v1_prefix.begin());
1050      Assume(m_recv_buffer.size() <= v1_prefix.size());
1051      if (!std::equal(m_recv_buffer.begin(), m_recv_buffer.end(), v1_prefix.begin())) {
1052          // Mismatch with v1 prefix, so we can assume a v2 connection.
1053          SetReceiveState(RecvState::KEY); // Convert to KEY state, leaving received bytes around.
1054          // Transition the sender to AWAITING_KEY state and start sending.
1055          LOCK(m_send_mutex);
1056          SetSendState(SendState::AWAITING_KEY);
1057          StartSendingHandshake();
1058      } else if (m_recv_buffer.size() == v1_prefix.size()) {
1059          // Full match with the v1 prefix, so fall back to v1 behavior.
1060          LOCK(m_send_mutex);
1061          Span<const uint8_t> feedback{m_recv_buffer};
1062          // Feed already received bytes to v1 transport. It should always accept these, because it's
1063          // less than the size of a v1 header, and these are the first bytes fed to m_v1_fallback.
1064          bool ret = m_v1_fallback.ReceivedBytes(feedback);
1065          Assume(feedback.empty());
1066          Assume(ret);
1067          SetReceiveState(RecvState::V1);
1068          SetSendState(SendState::V1);
1069          // Reset v2 transport buffers to save memory.
1070          ClearShrink(m_recv_buffer);
1071          ClearShrink(m_send_buffer);
1072      } else {
1073          // We have not received enough to distinguish v1 from v2 yet. Wait until more bytes come.
1074      }
1075  }
1076  
1077  bool V2Transport::ProcessReceivedKeyBytes() noexcept
1078  {
1079      AssertLockHeld(m_recv_mutex);
1080      AssertLockNotHeld(m_send_mutex);
1081      Assume(m_recv_state == RecvState::KEY);
1082      Assume(m_recv_buffer.size() <= EllSwiftPubKey::size());
1083  
1084      // As a special exception, if bytes 4-16 of the key on a responder connection match the
1085      // corresponding bytes of a V1 version message, but bytes 0-4 don't match the network magic
1086      // (if they did, we'd have switched to V1 state already), assume this is a peer from
1087      // another network, and disconnect them. They will almost certainly disconnect us too when
1088      // they receive our uniformly random key and garbage, but detecting this case specially
1089      // means we can log it.
1090      static constexpr std::array<uint8_t, 12> MATCH = {'v', 'e', 'r', 's', 'i', 'o', 'n', 0, 0, 0, 0, 0};
1091      static constexpr size_t OFFSET = std::tuple_size_v<MessageStartChars>;
1092      if (!m_initiating && m_recv_buffer.size() >= OFFSET + MATCH.size()) {
1093          if (std::equal(MATCH.begin(), MATCH.end(), m_recv_buffer.begin() + OFFSET)) {
1094              LogPrint(BCLog::NET, "V2 transport error: V1 peer with wrong MessageStart %s\n",
1095                       HexStr(Span(m_recv_buffer).first(OFFSET)));
1096              return false;
1097          }
1098      }
1099  
1100      if (m_recv_buffer.size() == EllSwiftPubKey::size()) {
1101          // Other side's key has been fully received, and can now be Diffie-Hellman combined with
1102          // our key to initialize the encryption ciphers.
1103  
1104          // Initialize the ciphers.
1105          EllSwiftPubKey ellswift(MakeByteSpan(m_recv_buffer));
1106          LOCK(m_send_mutex);
1107          m_cipher.Initialize(ellswift, m_initiating);
1108  
1109          // Switch receiver state to GARB_GARBTERM.
1110          SetReceiveState(RecvState::GARB_GARBTERM);
1111          m_recv_buffer.clear();
1112  
1113          // Switch sender state to READY.
1114          SetSendState(SendState::READY);
1115  
1116          // Append the garbage terminator to the send buffer.
1117          m_send_buffer.resize(m_send_buffer.size() + BIP324Cipher::GARBAGE_TERMINATOR_LEN);
1118          std::copy(m_cipher.GetSendGarbageTerminator().begin(),
1119                    m_cipher.GetSendGarbageTerminator().end(),
1120                    MakeWritableByteSpan(m_send_buffer).last(BIP324Cipher::GARBAGE_TERMINATOR_LEN).begin());
1121  
1122          // Construct version packet in the send buffer, with the sent garbage data as AAD.
1123          m_send_buffer.resize(m_send_buffer.size() + BIP324Cipher::EXPANSION + VERSION_CONTENTS.size());
1124          m_cipher.Encrypt(
1125              /*contents=*/VERSION_CONTENTS,
1126              /*aad=*/MakeByteSpan(m_send_garbage),
1127              /*ignore=*/false,
1128              /*output=*/MakeWritableByteSpan(m_send_buffer).last(BIP324Cipher::EXPANSION + VERSION_CONTENTS.size()));
1129          // We no longer need the garbage.
1130          ClearShrink(m_send_garbage);
1131      } else {
1132          // We still have to receive more key bytes.
1133      }
1134      return true;
1135  }
1136  
1137  bool V2Transport::ProcessReceivedGarbageBytes() noexcept
1138  {
1139      AssertLockHeld(m_recv_mutex);
1140      Assume(m_recv_state == RecvState::GARB_GARBTERM);
1141      Assume(m_recv_buffer.size() <= MAX_GARBAGE_LEN + BIP324Cipher::GARBAGE_TERMINATOR_LEN);
1142      if (m_recv_buffer.size() >= BIP324Cipher::GARBAGE_TERMINATOR_LEN) {
1143          if (MakeByteSpan(m_recv_buffer).last(BIP324Cipher::GARBAGE_TERMINATOR_LEN) == m_cipher.GetReceiveGarbageTerminator()) {
1144              // Garbage terminator received. Store garbage to authenticate it as AAD later.
1145              m_recv_aad = std::move(m_recv_buffer);
1146              m_recv_aad.resize(m_recv_aad.size() - BIP324Cipher::GARBAGE_TERMINATOR_LEN);
1147              m_recv_buffer.clear();
1148              SetReceiveState(RecvState::VERSION);
1149          } else if (m_recv_buffer.size() == MAX_GARBAGE_LEN + BIP324Cipher::GARBAGE_TERMINATOR_LEN) {
1150              // We've reached the maximum length for garbage + garbage terminator, and the
1151              // terminator still does not match. Abort.
1152              LogPrint(BCLog::NET, "V2 transport error: missing garbage terminator, peer=%d\n", m_nodeid);
1153              return false;
1154          } else {
1155              // We still need to receive more garbage and/or garbage terminator bytes.
1156          }
1157      } else {
1158          // We have less than GARBAGE_TERMINATOR_LEN (16) bytes, so we certainly need to receive
1159          // more first.
1160      }
1161      return true;
1162  }
1163  
1164  bool V2Transport::ProcessReceivedPacketBytes() noexcept
1165  {
1166      AssertLockHeld(m_recv_mutex);
1167      Assume(m_recv_state == RecvState::VERSION || m_recv_state == RecvState::APP);
1168  
1169      // The maximum permitted contents length for a packet, consisting of:
1170      // - 0x00 byte: indicating long message type encoding
1171      // - 12 bytes of message type
1172      // - payload
1173      static constexpr size_t MAX_CONTENTS_LEN =
1174          1 + CMessageHeader::COMMAND_SIZE +
1175          std::min<size_t>(MAX_SIZE, MAX_PROTOCOL_MESSAGE_LENGTH);
1176  
1177      if (m_recv_buffer.size() == BIP324Cipher::LENGTH_LEN) {
1178          // Length descriptor received.
1179          m_recv_len = m_cipher.DecryptLength(MakeByteSpan(m_recv_buffer));
1180          if (m_recv_len > MAX_CONTENTS_LEN) {
1181              LogPrint(BCLog::NET, "V2 transport error: packet too large (%u bytes), peer=%d\n", m_recv_len, m_nodeid);
1182              return false;
1183          }
1184      } else if (m_recv_buffer.size() > BIP324Cipher::LENGTH_LEN && m_recv_buffer.size() == m_recv_len + BIP324Cipher::EXPANSION) {
1185          // Ciphertext received, decrypt it into m_recv_decode_buffer.
1186          // Note that it is impossible to reach this branch without hitting the branch above first,
1187          // as GetMaxBytesToProcess only allows up to LENGTH_LEN into the buffer before that point.
1188          m_recv_decode_buffer.resize(m_recv_len);
1189          bool ignore{false};
1190          bool ret = m_cipher.Decrypt(
1191              /*input=*/MakeByteSpan(m_recv_buffer).subspan(BIP324Cipher::LENGTH_LEN),
1192              /*aad=*/MakeByteSpan(m_recv_aad),
1193              /*ignore=*/ignore,
1194              /*contents=*/MakeWritableByteSpan(m_recv_decode_buffer));
1195          if (!ret) {
1196              LogPrint(BCLog::NET, "V2 transport error: packet decryption failure (%u bytes), peer=%d\n", m_recv_len, m_nodeid);
1197              return false;
1198          }
1199          // We have decrypted a valid packet with the AAD we expected, so clear the expected AAD.
1200          ClearShrink(m_recv_aad);
1201          // Feed the last 4 bytes of the Poly1305 authentication tag (and its timing) into our RNG.
1202          RandAddEvent(ReadLE32(m_recv_buffer.data() + m_recv_buffer.size() - 4));
1203  
1204          // At this point we have a valid packet decrypted into m_recv_decode_buffer. If it's not a
1205          // decoy, which we simply ignore, use the current state to decide what to do with it.
1206          if (!ignore) {
1207              switch (m_recv_state) {
1208              case RecvState::VERSION:
1209                  // Version message received; transition to application phase. The contents is
1210                  // ignored, but can be used for future extensions.
1211                  SetReceiveState(RecvState::APP);
1212                  break;
1213              case RecvState::APP:
1214                  // Application message decrypted correctly. It can be extracted using GetMessage().
1215                  SetReceiveState(RecvState::APP_READY);
1216                  break;
1217              default:
1218                  // Any other state is invalid (this function should not have been called).
1219                  Assume(false);
1220              }
1221          }
1222          // Wipe the receive buffer where the next packet will be received into.
1223          ClearShrink(m_recv_buffer);
1224          // In all but APP_READY state, we can wipe the decoded contents.
1225          if (m_recv_state != RecvState::APP_READY) ClearShrink(m_recv_decode_buffer);
1226      } else {
1227          // We either have less than 3 bytes, so we don't know the packet's length yet, or more
1228          // than 3 bytes but less than the packet's full ciphertext. Wait until those arrive.
1229      }
1230      return true;
1231  }
1232  
1233  size_t V2Transport::GetMaxBytesToProcess() noexcept
1234  {
1235      AssertLockHeld(m_recv_mutex);
1236      switch (m_recv_state) {
1237      case RecvState::KEY_MAYBE_V1:
1238          // During the KEY_MAYBE_V1 state we do not allow more than the length of v1 prefix into the
1239          // receive buffer.
1240          Assume(m_recv_buffer.size() <= V1_PREFIX_LEN);
1241          // As long as we're not sure if this is a v1 or v2 connection, don't receive more than what
1242          // is strictly necessary to distinguish the two (16 bytes). If we permitted more than
1243          // the v1 header size (24 bytes), we may not be able to feed the already-received bytes
1244          // back into the m_v1_fallback V1 transport.
1245          return V1_PREFIX_LEN - m_recv_buffer.size();
1246      case RecvState::KEY:
1247          // During the KEY state, we only allow the 64-byte key into the receive buffer.
1248          Assume(m_recv_buffer.size() <= EllSwiftPubKey::size());
1249          // As long as we have not received the other side's public key, don't receive more than
1250          // that (64 bytes), as garbage follows, and locating the garbage terminator requires the
1251          // key exchange first.
1252          return EllSwiftPubKey::size() - m_recv_buffer.size();
1253      case RecvState::GARB_GARBTERM:
1254          // Process garbage bytes one by one (because terminator may appear anywhere).
1255          return 1;
1256      case RecvState::VERSION:
1257      case RecvState::APP:
1258          // These three states all involve decoding a packet. Process the length descriptor first,
1259          // so that we know where the current packet ends (and we don't process bytes from the next
1260          // packet or decoy yet). Then, process the ciphertext bytes of the current packet.
1261          if (m_recv_buffer.size() < BIP324Cipher::LENGTH_LEN) {
1262              return BIP324Cipher::LENGTH_LEN - m_recv_buffer.size();
1263          } else {
1264              // Note that BIP324Cipher::EXPANSION is the total difference between contents size
1265              // and encoded packet size, which includes the 3 bytes due to the packet length.
1266              // When transitioning from receiving the packet length to receiving its ciphertext,
1267              // the encrypted packet length is left in the receive buffer.
1268              return BIP324Cipher::EXPANSION + m_recv_len - m_recv_buffer.size();
1269          }
1270      case RecvState::APP_READY:
1271          // No bytes can be processed until GetMessage() is called.
1272          return 0;
1273      case RecvState::V1:
1274          // Not allowed (must be dealt with by the caller).
1275          Assume(false);
1276          return 0;
1277      }
1278      Assume(false); // unreachable
1279      return 0;
1280  }
1281  
1282  bool V2Transport::ReceivedBytes(Span<const uint8_t>& msg_bytes) noexcept
1283  {
1284      AssertLockNotHeld(m_recv_mutex);
1285      /** How many bytes to allocate in the receive buffer at most above what is received so far. */
1286      static constexpr size_t MAX_RESERVE_AHEAD = 256 * 1024;
1287  
1288      LOCK(m_recv_mutex);
1289      if (m_recv_state == RecvState::V1) return m_v1_fallback.ReceivedBytes(msg_bytes);
1290  
1291      // Process the provided bytes in msg_bytes in a loop. In each iteration a nonzero number of
1292      // bytes (decided by GetMaxBytesToProcess) are taken from the beginning om msg_bytes, and
1293      // appended to m_recv_buffer. Then, depending on the receiver state, one of the
1294      // ProcessReceived*Bytes functions is called to process the bytes in that buffer.
1295      while (!msg_bytes.empty()) {
1296          // Decide how many bytes to copy from msg_bytes to m_recv_buffer.
1297          size_t max_read = GetMaxBytesToProcess();
1298  
1299          // Reserve space in the buffer if there is not enough.
1300          if (m_recv_buffer.size() + std::min(msg_bytes.size(), max_read) > m_recv_buffer.capacity()) {
1301              switch (m_recv_state) {
1302              case RecvState::KEY_MAYBE_V1:
1303              case RecvState::KEY:
1304              case RecvState::GARB_GARBTERM:
1305                  // During the initial states (key/garbage), allocate once to fit the maximum (4111
1306                  // bytes).
1307                  m_recv_buffer.reserve(MAX_GARBAGE_LEN + BIP324Cipher::GARBAGE_TERMINATOR_LEN);
1308                  break;
1309              case RecvState::VERSION:
1310              case RecvState::APP: {
1311                  // During states where a packet is being received, as much as is expected but never
1312                  // more than MAX_RESERVE_AHEAD bytes in addition to what is received so far.
1313                  // This means attackers that want to cause us to waste allocated memory are limited
1314                  // to MAX_RESERVE_AHEAD above the largest allowed message contents size, and to
1315                  // MAX_RESERVE_AHEAD more than they've actually sent us.
1316                  size_t alloc_add = std::min(max_read, msg_bytes.size() + MAX_RESERVE_AHEAD);
1317                  m_recv_buffer.reserve(m_recv_buffer.size() + alloc_add);
1318                  break;
1319              }
1320              case RecvState::APP_READY:
1321                  // The buffer is empty in this state.
1322                  Assume(m_recv_buffer.empty());
1323                  break;
1324              case RecvState::V1:
1325                  // Should have bailed out above.
1326                  Assume(false);
1327                  break;
1328              }
1329          }
1330  
1331          // Can't read more than provided input.
1332          max_read = std::min(msg_bytes.size(), max_read);
1333          // Copy data to buffer.
1334          m_recv_buffer.insert(m_recv_buffer.end(), UCharCast(msg_bytes.data()), UCharCast(msg_bytes.data() + max_read));
1335          msg_bytes = msg_bytes.subspan(max_read);
1336  
1337          // Process data in the buffer.
1338          switch (m_recv_state) {
1339          case RecvState::KEY_MAYBE_V1:
1340              ProcessReceivedMaybeV1Bytes();
1341              if (m_recv_state == RecvState::V1) return true;
1342              break;
1343  
1344          case RecvState::KEY:
1345              if (!ProcessReceivedKeyBytes()) return false;
1346              break;
1347  
1348          case RecvState::GARB_GARBTERM:
1349              if (!ProcessReceivedGarbageBytes()) return false;
1350              break;
1351  
1352          case RecvState::VERSION:
1353          case RecvState::APP:
1354              if (!ProcessReceivedPacketBytes()) return false;
1355              break;
1356  
1357          case RecvState::APP_READY:
1358              return true;
1359  
1360          case RecvState::V1:
1361              // We should have bailed out before.
1362              Assume(false);
1363              break;
1364          }
1365          // Make sure we have made progress before continuing.
1366          Assume(max_read > 0);
1367      }
1368  
1369      return true;
1370  }
1371  
1372  std::optional<std::string> V2Transport::GetMessageType(Span<const uint8_t>& contents) noexcept
1373  {
1374      if (contents.size() == 0) return std::nullopt; // Empty contents
1375      uint8_t first_byte = contents[0];
1376      contents = contents.subspan(1); // Strip first byte.
1377  
1378      if (first_byte != 0) {
1379          // Short (1 byte) encoding.
1380          if (first_byte < std::size(V2_MESSAGE_IDS)) {
1381              // Valid short message id.
1382              return V2_MESSAGE_IDS[first_byte];
1383          } else {
1384              // Unknown short message id.
1385              return std::nullopt;
1386          }
1387      }
1388  
1389      if (contents.size() < CMessageHeader::COMMAND_SIZE) {
1390          return std::nullopt; // Long encoding needs 12 message type bytes.
1391      }
1392  
1393      size_t msg_type_len{0};
1394      while (msg_type_len < CMessageHeader::COMMAND_SIZE && contents[msg_type_len] != 0) {
1395          // Verify that message type bytes before the first 0x00 are in range.
1396          if (contents[msg_type_len] < ' ' || contents[msg_type_len] > 0x7F) {
1397              return {};
1398          }
1399          ++msg_type_len;
1400      }
1401      std::string ret{reinterpret_cast<const char*>(contents.data()), msg_type_len};
1402      while (msg_type_len < CMessageHeader::COMMAND_SIZE) {
1403          // Verify that message type bytes after the first 0x00 are also 0x00.
1404          if (contents[msg_type_len] != 0) return {};
1405          ++msg_type_len;
1406      }
1407      // Strip message type bytes of contents.
1408      contents = contents.subspan(CMessageHeader::COMMAND_SIZE);
1409      return ret;
1410  }
1411  
1412  CNetMessage V2Transport::GetReceivedMessage(std::chrono::microseconds time, bool& reject_message) noexcept
1413  {
1414      AssertLockNotHeld(m_recv_mutex);
1415      LOCK(m_recv_mutex);
1416      if (m_recv_state == RecvState::V1) return m_v1_fallback.GetReceivedMessage(time, reject_message);
1417  
1418      Assume(m_recv_state == RecvState::APP_READY);
1419      Span<const uint8_t> contents{m_recv_decode_buffer};
1420      auto msg_type = GetMessageType(contents);
1421      CNetMessage msg{DataStream{}};
1422      // Note that BIP324Cipher::EXPANSION also includes the length descriptor size.
1423      msg.m_raw_message_size = m_recv_decode_buffer.size() + BIP324Cipher::EXPANSION;
1424      if (msg_type) {
1425          reject_message = false;
1426          msg.m_type = std::move(*msg_type);
1427          msg.m_time = time;
1428          msg.m_message_size = contents.size();
1429          msg.m_recv.resize(contents.size());
1430          std::copy(contents.begin(), contents.end(), UCharCast(msg.m_recv.data()));
1431      } else {
1432          LogPrint(BCLog::NET, "V2 transport error: invalid message type (%u bytes contents), peer=%d\n", m_recv_decode_buffer.size(), m_nodeid);
1433          reject_message = true;
1434      }
1435      ClearShrink(m_recv_decode_buffer);
1436      SetReceiveState(RecvState::APP);
1437  
1438      return msg;
1439  }
1440  
1441  bool V2Transport::SetMessageToSend(CSerializedNetMsg& msg) noexcept
1442  {
1443      AssertLockNotHeld(m_send_mutex);
1444      LOCK(m_send_mutex);
1445      if (m_send_state == SendState::V1) return m_v1_fallback.SetMessageToSend(msg);
1446      // We only allow adding a new message to be sent when in the READY state (so the packet cipher
1447      // is available) and the send buffer is empty. This limits the number of messages in the send
1448      // buffer to just one, and leaves the responsibility for queueing them up to the caller.
1449      if (!(m_send_state == SendState::READY && m_send_buffer.empty())) return false;
1450      // Construct contents (encoding message type + payload).
1451      std::vector<uint8_t> contents;
1452      auto short_message_id = V2_MESSAGE_MAP(msg.m_type);
1453      if (short_message_id) {
1454          contents.resize(1 + msg.data.size());
1455          contents[0] = *short_message_id;
1456          std::copy(msg.data.begin(), msg.data.end(), contents.begin() + 1);
1457      } else {
1458          // Initialize with zeroes, and then write the message type string starting at offset 1.
1459          // This means contents[0] and the unused positions in contents[1..13] remain 0x00.
1460          contents.resize(1 + CMessageHeader::COMMAND_SIZE + msg.data.size(), 0);
1461          std::copy(msg.m_type.begin(), msg.m_type.end(), contents.data() + 1);
1462          std::copy(msg.data.begin(), msg.data.end(), contents.begin() + 1 + CMessageHeader::COMMAND_SIZE);
1463      }
1464      // Construct ciphertext in send buffer.
1465      m_send_buffer.resize(contents.size() + BIP324Cipher::EXPANSION);
1466      m_cipher.Encrypt(MakeByteSpan(contents), {}, false, MakeWritableByteSpan(m_send_buffer));
1467      m_send_type = msg.m_type;
1468      // Release memory
1469      ClearShrink(msg.data);
1470      return true;
1471  }
1472  
1473  Transport::BytesToSend V2Transport::GetBytesToSend(bool have_next_message) const noexcept
1474  {
1475      AssertLockNotHeld(m_send_mutex);
1476      LOCK(m_send_mutex);
1477      if (m_send_state == SendState::V1) return m_v1_fallback.GetBytesToSend(have_next_message);
1478  
1479      if (m_send_state == SendState::MAYBE_V1) Assume(m_send_buffer.empty());
1480      Assume(m_send_pos <= m_send_buffer.size());
1481      return {
1482          Span{m_send_buffer}.subspan(m_send_pos),
1483          // We only have more to send after the current m_send_buffer if there is a (next)
1484          // message to be sent, and we're capable of sending packets. */
1485          have_next_message && m_send_state == SendState::READY,
1486          m_send_type
1487      };
1488  }
1489  
1490  void V2Transport::MarkBytesSent(size_t bytes_sent) noexcept
1491  {
1492      AssertLockNotHeld(m_send_mutex);
1493      LOCK(m_send_mutex);
1494      if (m_send_state == SendState::V1) return m_v1_fallback.MarkBytesSent(bytes_sent);
1495  
1496      if (m_send_state == SendState::AWAITING_KEY && m_send_pos == 0 && bytes_sent > 0) {
1497          LogPrint(BCLog::NET, "start sending v2 handshake to peer=%d\n", m_nodeid);
1498      }
1499  
1500      m_send_pos += bytes_sent;
1501      Assume(m_send_pos <= m_send_buffer.size());
1502      if (m_send_pos >= CMessageHeader::HEADER_SIZE) {
1503          m_sent_v1_header_worth = true;
1504      }
1505      // Wipe the buffer when everything is sent.
1506      if (m_send_pos == m_send_buffer.size()) {
1507          m_send_pos = 0;
1508          ClearShrink(m_send_buffer);
1509      }
1510  }
1511  
1512  bool V2Transport::ShouldReconnectV1() const noexcept
1513  {
1514      AssertLockNotHeld(m_send_mutex);
1515      AssertLockNotHeld(m_recv_mutex);
1516      // Only outgoing connections need reconnection.
1517      if (!m_initiating) return false;
1518  
1519      LOCK(m_recv_mutex);
1520      // We only reconnect in the very first state and when the receive buffer is empty. Together
1521      // these conditions imply nothing has been received so far.
1522      if (m_recv_state != RecvState::KEY) return false;
1523      if (!m_recv_buffer.empty()) return false;
1524      // Check if we've sent enough for the other side to disconnect us (if it was V1).
1525      LOCK(m_send_mutex);
1526      return m_sent_v1_header_worth;
1527  }
1528  
1529  size_t V2Transport::GetSendMemoryUsage() const noexcept
1530  {
1531      AssertLockNotHeld(m_send_mutex);
1532      LOCK(m_send_mutex);
1533      if (m_send_state == SendState::V1) return m_v1_fallback.GetSendMemoryUsage();
1534  
1535      return sizeof(m_send_buffer) + memusage::DynamicUsage(m_send_buffer);
1536  }
1537  
1538  Transport::Info V2Transport::GetInfo() const noexcept
1539  {
1540      AssertLockNotHeld(m_recv_mutex);
1541      LOCK(m_recv_mutex);
1542      if (m_recv_state == RecvState::V1) return m_v1_fallback.GetInfo();
1543  
1544      Transport::Info info;
1545  
1546      // Do not report v2 and session ID until the version packet has been received
1547      // and verified (confirming that the other side very likely has the same keys as us).
1548      if (m_recv_state != RecvState::KEY_MAYBE_V1 && m_recv_state != RecvState::KEY &&
1549          m_recv_state != RecvState::GARB_GARBTERM && m_recv_state != RecvState::VERSION) {
1550          info.transport_type = TransportProtocolType::V2;
1551          info.session_id = uint256(MakeUCharSpan(m_cipher.GetSessionID()));
1552      } else {
1553          info.transport_type = TransportProtocolType::DETECTING;
1554      }
1555  
1556      return info;
1557  }
1558  
1559  std::pair<size_t, bool> CConnman::SocketSendData(CNode& node) const
1560  {
1561      auto it = node.vSendMsg.begin();
1562      size_t nSentSize = 0;
1563      bool data_left{false}; //!< second return value (whether unsent data remains)
1564      std::optional<bool> expected_more;
1565  
1566      while (true) {
1567          if (it != node.vSendMsg.end()) {
1568              // If possible, move one message from the send queue to the transport. This fails when
1569              // there is an existing message still being sent, or (for v2 transports) when the
1570              // handshake has not yet completed.
1571              size_t memusage = it->GetMemoryUsage();
1572              if (node.m_transport->SetMessageToSend(*it)) {
1573                  // Update memory usage of send buffer (as *it will be deleted).
1574                  node.m_send_memusage -= memusage;
1575                  ++it;
1576              }
1577          }
1578          const auto& [data, more, msg_type] = node.m_transport->GetBytesToSend(it != node.vSendMsg.end());
1579          // We rely on the 'more' value returned by GetBytesToSend to correctly predict whether more
1580          // bytes are still to be sent, to correctly set the MSG_MORE flag. As a sanity check,
1581          // verify that the previously returned 'more' was correct.
1582          if (expected_more.has_value()) Assume(!data.empty() == *expected_more);
1583          expected_more = more;
1584          data_left = !data.empty(); // will be overwritten on next loop if all of data gets sent
1585          int nBytes = 0;
1586          if (!data.empty()) {
1587              LOCK(node.m_sock_mutex);
1588              // There is no socket in case we've already disconnected, or in test cases without
1589              // real connections. In these cases, we bail out immediately and just leave things
1590              // in the send queue and transport.
1591              if (!node.m_sock) {
1592                  break;
1593              }
1594              int flags = MSG_NOSIGNAL | MSG_DONTWAIT;
1595  #ifdef MSG_MORE
1596              if (more) {
1597                  flags |= MSG_MORE;
1598              }
1599  #endif
1600              nBytes = node.m_sock->Send(reinterpret_cast<const char*>(data.data()), data.size(), flags);
1601          }
1602          if (nBytes > 0) {
1603              node.m_last_send = GetTime<std::chrono::seconds>();
1604              node.nSendBytes += nBytes;
1605              // Notify transport that bytes have been processed.
1606              node.m_transport->MarkBytesSent(nBytes);
1607              // Update statistics per message type.
1608              if (!msg_type.empty()) { // don't report v2 handshake bytes for now
1609                  node.AccountForSentBytes(msg_type, nBytes);
1610              }
1611              nSentSize += nBytes;
1612              if ((size_t)nBytes != data.size()) {
1613                  // could not send full message; stop sending more
1614                  break;
1615              }
1616          } else {
1617              if (nBytes < 0) {
1618                  // error
1619                  int nErr = WSAGetLastError();
1620                  if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) {
1621                      LogPrint(BCLog::NET, "socket send error for peer=%d: %s\n", node.GetId(), NetworkErrorString(nErr));
1622                      node.CloseSocketDisconnect();
1623                  }
1624              }
1625              break;
1626          }
1627      }
1628  
1629      node.fPauseSend = node.m_send_memusage + node.m_transport->GetSendMemoryUsage() > nSendBufferMaxSize;
1630  
1631      if (it == node.vSendMsg.end()) {
1632          assert(node.m_send_memusage == 0);
1633      }
1634      node.vSendMsg.erase(node.vSendMsg.begin(), it);
1635      return {nSentSize, data_left};
1636  }
1637  
1638  /** Try to find a connection to evict when the node is full.
1639   *  Extreme care must be taken to avoid opening the node to attacker
1640   *   triggered network partitioning.
1641   *  The strategy used here is to protect a small number of peers
1642   *   for each of several distinct characteristics which are difficult
1643   *   to forge.  In order to partition a node the attacker must be
1644   *   simultaneously better at all of them than honest peers.
1645   */
1646  bool CConnman::AttemptToEvictConnection()
1647  {
1648      std::vector<NodeEvictionCandidate> vEvictionCandidates;
1649      {
1650  
1651          LOCK(m_nodes_mutex);
1652          for (const CNode* node : m_nodes) {
1653              if (node->fDisconnect)
1654                  continue;
1655              NodeEvictionCandidate candidate{
1656                  .id = node->GetId(),
1657                  .m_connected = node->m_connected,
1658                  .m_min_ping_time = node->m_min_ping_time,
1659                  .m_last_block_time = node->m_last_block_time,
1660                  .m_last_tx_time = node->m_last_tx_time,
1661                  .fRelevantServices = node->m_has_all_wanted_services,
1662                  .m_relay_txs = node->m_relays_txs.load(),
1663                  .fBloomFilter = node->m_bloom_filter_loaded.load(),
1664                  .nKeyedNetGroup = node->nKeyedNetGroup,
1665                  .prefer_evict = node->m_prefer_evict,
1666                  .m_is_local = node->addr.IsLocal(),
1667                  .m_network = node->ConnectedThroughNetwork(),
1668                  .m_noban = node->HasPermission(NetPermissionFlags::NoBan),
1669                  .m_conn_type = node->m_conn_type,
1670              };
1671              vEvictionCandidates.push_back(candidate);
1672          }
1673      }
1674      const std::optional<NodeId> node_id_to_evict = SelectNodeToEvict(std::move(vEvictionCandidates));
1675      if (!node_id_to_evict) {
1676          return false;
1677      }
1678      LOCK(m_nodes_mutex);
1679      for (CNode* pnode : m_nodes) {
1680          if (pnode->GetId() == *node_id_to_evict) {
1681              LogPrint(BCLog::NET, "selected %s connection for eviction peer=%d; disconnecting\n", pnode->ConnectionTypeAsString(), pnode->GetId());
1682              pnode->fDisconnect = true;
1683              return true;
1684          }
1685      }
1686      return false;
1687  }
1688  
1689  void CConnman::AcceptConnection(const ListenSocket& hListenSocket) {
1690      struct sockaddr_storage sockaddr;
1691      socklen_t len = sizeof(sockaddr);
1692      auto sock = hListenSocket.sock->Accept((struct sockaddr*)&sockaddr, &len);
1693      CAddress addr;
1694  
1695      if (!sock) {
1696          const int nErr = WSAGetLastError();
1697          if (nErr != WSAEWOULDBLOCK) {
1698              LogPrintf("socket error accept failed: %s\n", NetworkErrorString(nErr));
1699          }
1700          return;
1701      }
1702  
1703      if (!addr.SetSockAddr((const struct sockaddr*)&sockaddr)) {
1704          LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "Unknown socket family\n");
1705      } else {
1706          addr = CAddress{MaybeFlipIPv6toCJDNS(addr), NODE_NONE};
1707      }
1708  
1709      const CAddress addr_bind{MaybeFlipIPv6toCJDNS(GetBindAddress(*sock)), NODE_NONE};
1710  
1711      NetPermissionFlags permission_flags = NetPermissionFlags::None;
1712      hListenSocket.AddSocketPermissionFlags(permission_flags);
1713  
1714      CreateNodeFromAcceptedSocket(std::move(sock), permission_flags, addr_bind, addr);
1715  }
1716  
1717  void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock,
1718                                              NetPermissionFlags permission_flags,
1719                                              const CAddress& addr_bind,
1720                                              const CAddress& addr)
1721  {
1722      int nInbound = 0;
1723  
1724      AddWhitelistPermissionFlags(permission_flags, addr, vWhitelistedRangeIncoming);
1725  
1726      {
1727          LOCK(m_nodes_mutex);
1728          for (const CNode* pnode : m_nodes) {
1729              if (pnode->IsInboundConn()) nInbound++;
1730          }
1731      }
1732  
1733      if (!fNetworkActive) {
1734          LogPrint(BCLog::NET, "connection from %s dropped: not accepting new connections\n", addr.ToStringAddrPort());
1735          return;
1736      }
1737  
1738      if (!sock->IsSelectable()) {
1739          LogPrintf("connection from %s dropped: non-selectable socket\n", addr.ToStringAddrPort());
1740          return;
1741      }
1742  
1743      // According to the internet TCP_NODELAY is not carried into accepted sockets
1744      // on all platforms.  Set it again here just to be sure.
1745      const int on{1};
1746      if (sock->SetSockOpt(IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)) == SOCKET_ERROR) {
1747          LogPrint(BCLog::NET, "connection from %s: unable to set TCP_NODELAY, continuing anyway\n",
1748                   addr.ToStringAddrPort());
1749      }
1750  
1751      // Don't accept connections from banned peers.
1752      bool banned = m_banman && m_banman->IsBanned(addr);
1753      if (!NetPermissions::HasFlag(permission_flags, NetPermissionFlags::NoBan) && banned)
1754      {
1755          LogPrint(BCLog::NET, "connection from %s dropped (banned)\n", addr.ToStringAddrPort());
1756          return;
1757      }
1758  
1759      // Only accept connections from discouraged peers if our inbound slots aren't (almost) full.
1760      bool discouraged = m_banman && m_banman->IsDiscouraged(addr);
1761      if (!NetPermissions::HasFlag(permission_flags, NetPermissionFlags::NoBan) && nInbound + 1 >= m_max_inbound && discouraged)
1762      {
1763          LogPrint(BCLog::NET, "connection from %s dropped (discouraged)\n", addr.ToStringAddrPort());
1764          return;
1765      }
1766  
1767      if (nInbound >= m_max_inbound)
1768      {
1769          if (!AttemptToEvictConnection()) {
1770              // No connection to evict, disconnect the new connection
1771              LogPrint(BCLog::NET, "failed to find an eviction candidate - connection dropped (full)\n");
1772              return;
1773          }
1774      }
1775  
1776      NodeId id = GetNewNodeId();
1777      uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize();
1778  
1779      const bool inbound_onion = std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) != m_onion_binds.end();
1780      // The V2Transport transparently falls back to V1 behavior when an incoming V1 connection is
1781      // detected, so use it whenever we signal NODE_P2P_V2.
1782      const bool use_v2transport(nLocalServices & NODE_P2P_V2);
1783  
1784      CNode* pnode = new CNode(id,
1785                               std::move(sock),
1786                               addr,
1787                               CalculateKeyedNetGroup(addr),
1788                               nonce,
1789                               addr_bind,
1790                               /*addrNameIn=*/"",
1791                               ConnectionType::INBOUND,
1792                               inbound_onion,
1793                               CNodeOptions{
1794                                   .permission_flags = permission_flags,
1795                                   .prefer_evict = discouraged,
1796                                   .recv_flood_size = nReceiveFloodSize,
1797                                   .use_v2transport = use_v2transport,
1798                               });
1799      pnode->AddRef();
1800      m_msgproc->InitializeNode(*pnode, nLocalServices);
1801  
1802      LogPrint(BCLog::NET, "connection from %s accepted\n", addr.ToStringAddrPort());
1803  
1804      {
1805          LOCK(m_nodes_mutex);
1806          m_nodes.push_back(pnode);
1807      }
1808  
1809      // We received a new connection, harvest entropy from the time (and our peer count)
1810      RandAddEvent((uint32_t)id);
1811  }
1812  
1813  bool CConnman::AddConnection(const std::string& address, ConnectionType conn_type, bool use_v2transport = false)
1814  {
1815      AssertLockNotHeld(m_unused_i2p_sessions_mutex);
1816      std::optional<int> max_connections;
1817      switch (conn_type) {
1818      case ConnectionType::INBOUND:
1819      case ConnectionType::MANUAL:
1820          return false;
1821      case ConnectionType::OUTBOUND_FULL_RELAY:
1822          max_connections = m_max_outbound_full_relay;
1823          break;
1824      case ConnectionType::BLOCK_RELAY:
1825          max_connections = m_max_outbound_block_relay;
1826          break;
1827      // no limit for ADDR_FETCH because -seednode has no limit either
1828      case ConnectionType::ADDR_FETCH:
1829          break;
1830      // no limit for FEELER connections since they're short-lived
1831      case ConnectionType::FEELER:
1832          break;
1833      } // no default case, so the compiler can warn about missing cases
1834  
1835      // Count existing connections
1836      int existing_connections = WITH_LOCK(m_nodes_mutex,
1837                                           return std::count_if(m_nodes.begin(), m_nodes.end(), [conn_type](CNode* node) { return node->m_conn_type == conn_type; }););
1838  
1839      // Max connections of specified type already exist
1840      if (max_connections != std::nullopt && existing_connections >= max_connections) return false;
1841  
1842      // Max total outbound connections already exist
1843      CSemaphoreGrant grant(*semOutbound, true);
1844      if (!grant) return false;
1845  
1846      OpenNetworkConnection(CAddress(), false, std::move(grant), address.c_str(), conn_type, /*use_v2transport=*/use_v2transport);
1847      return true;
1848  }
1849  
1850  void CConnman::DisconnectNodes()
1851  {
1852      AssertLockNotHeld(m_nodes_mutex);
1853      AssertLockNotHeld(m_reconnections_mutex);
1854  
1855      // Use a temporary variable to accumulate desired reconnections, so we don't need
1856      // m_reconnections_mutex while holding m_nodes_mutex.
1857      decltype(m_reconnections) reconnections_to_add;
1858  
1859      {
1860          LOCK(m_nodes_mutex);
1861  
1862          if (!fNetworkActive) {
1863              // Disconnect any connected nodes
1864              for (CNode* pnode : m_nodes) {
1865                  if (!pnode->fDisconnect) {
1866                      LogPrint(BCLog::NET, "Network not active, dropping peer=%d\n", pnode->GetId());
1867                      pnode->fDisconnect = true;
1868                  }
1869              }
1870          }
1871  
1872          // Disconnect unused nodes
1873          std::vector<CNode*> nodes_copy = m_nodes;
1874          for (CNode* pnode : nodes_copy)
1875          {
1876              if (pnode->fDisconnect)
1877              {
1878                  // remove from m_nodes
1879                  m_nodes.erase(remove(m_nodes.begin(), m_nodes.end(), pnode), m_nodes.end());
1880  
1881                  // Add to reconnection list if appropriate. We don't reconnect right here, because
1882                  // the creation of a connection is a blocking operation (up to several seconds),
1883                  // and we don't want to hold up the socket handler thread for that long.
1884                  if (pnode->m_transport->ShouldReconnectV1()) {
1885                      reconnections_to_add.push_back({
1886                          .addr_connect = pnode->addr,
1887                          .grant = std::move(pnode->grantOutbound),
1888                          .destination = pnode->m_dest,
1889                          .conn_type = pnode->m_conn_type,
1890                          .use_v2transport = false});
1891                      LogPrint(BCLog::NET, "retrying with v1 transport protocol for peer=%d\n", pnode->GetId());
1892                  }
1893  
1894                  // release outbound grant (if any)
1895                  pnode->grantOutbound.Release();
1896  
1897                  // close socket and cleanup
1898                  pnode->CloseSocketDisconnect();
1899  
1900                  // update connection count by network
1901                  if (pnode->IsManualOrFullOutboundConn()) --m_network_conn_counts[pnode->addr.GetNetwork()];
1902  
1903                  // hold in disconnected pool until all refs are released
1904                  pnode->Release();
1905                  m_nodes_disconnected.push_back(pnode);
1906              }
1907          }
1908      }
1909      {
1910          // Delete disconnected nodes
1911          std::list<CNode*> nodes_disconnected_copy = m_nodes_disconnected;
1912          for (CNode* pnode : nodes_disconnected_copy)
1913          {
1914              // Destroy the object only after other threads have stopped using it.
1915              if (pnode->GetRefCount() <= 0) {
1916                  m_nodes_disconnected.remove(pnode);
1917                  DeleteNode(pnode);
1918              }
1919          }
1920      }
1921      {
1922          // Move entries from reconnections_to_add to m_reconnections.
1923          LOCK(m_reconnections_mutex);
1924          m_reconnections.splice(m_reconnections.end(), std::move(reconnections_to_add));
1925      }
1926  }
1927  
1928  void CConnman::NotifyNumConnectionsChanged()
1929  {
1930      size_t nodes_size;
1931      {
1932          LOCK(m_nodes_mutex);
1933          nodes_size = m_nodes.size();
1934      }
1935      if(nodes_size != nPrevNodeCount) {
1936          nPrevNodeCount = nodes_size;
1937          if (m_client_interface) {
1938              m_client_interface->NotifyNumConnectionsChanged(nodes_size);
1939          }
1940      }
1941  }
1942  
1943  bool CConnman::ShouldRunInactivityChecks(const CNode& node, std::chrono::seconds now) const
1944  {
1945      return node.m_connected + m_peer_connect_timeout < now;
1946  }
1947  
1948  bool CConnman::InactivityCheck(const CNode& node) const
1949  {
1950      // Tests that see disconnects after using mocktime can start nodes with a
1951      // large timeout. For example, -peertimeout=999999999.
1952      const auto now{GetTime<std::chrono::seconds>()};
1953      const auto last_send{node.m_last_send.load()};
1954      const auto last_recv{node.m_last_recv.load()};
1955  
1956      if (!ShouldRunInactivityChecks(node, now)) return false;
1957  
1958      if (last_recv.count() == 0 || last_send.count() == 0) {
1959          LogPrint(BCLog::NET, "socket no message in first %i seconds, %d %d peer=%d\n", count_seconds(m_peer_connect_timeout), last_recv.count() != 0, last_send.count() != 0, node.GetId());
1960          return true;
1961      }
1962  
1963      if (now > last_send + TIMEOUT_INTERVAL) {
1964          LogPrint(BCLog::NET, "socket sending timeout: %is peer=%d\n", count_seconds(now - last_send), node.GetId());
1965          return true;
1966      }
1967  
1968      if (now > last_recv + TIMEOUT_INTERVAL) {
1969          LogPrint(BCLog::NET, "socket receive timeout: %is peer=%d\n", count_seconds(now - last_recv), node.GetId());
1970          return true;
1971      }
1972  
1973      if (!node.fSuccessfullyConnected) {
1974          LogPrint(BCLog::NET, "version handshake timeout peer=%d\n", node.GetId());
1975          return true;
1976      }
1977  
1978      return false;
1979  }
1980  
1981  Sock::EventsPerSock CConnman::GenerateWaitSockets(Span<CNode* const> nodes)
1982  {
1983      Sock::EventsPerSock events_per_sock;
1984  
1985      for (const ListenSocket& hListenSocket : vhListenSocket) {
1986          events_per_sock.emplace(hListenSocket.sock, Sock::Events{Sock::RECV});
1987      }
1988  
1989      for (CNode* pnode : nodes) {
1990          bool select_recv = !pnode->fPauseRecv;
1991          bool select_send;
1992          {
1993              LOCK(pnode->cs_vSend);
1994              // Sending is possible if either there are bytes to send right now, or if there will be
1995              // once a potential message from vSendMsg is handed to the transport. GetBytesToSend
1996              // determines both of these in a single call.
1997              const auto& [to_send, more, _msg_type] = pnode->m_transport->GetBytesToSend(!pnode->vSendMsg.empty());
1998              select_send = !to_send.empty() || more;
1999          }
2000          if (!select_recv && !select_send) continue;
2001  
2002          LOCK(pnode->m_sock_mutex);
2003          if (pnode->m_sock) {
2004              Sock::Event event = (select_send ? Sock::SEND : 0) | (select_recv ? Sock::RECV : 0);
2005              events_per_sock.emplace(pnode->m_sock, Sock::Events{event});
2006          }
2007      }
2008  
2009      return events_per_sock;
2010  }
2011  
2012  void CConnman::SocketHandler()
2013  {
2014      AssertLockNotHeld(m_total_bytes_sent_mutex);
2015  
2016      Sock::EventsPerSock events_per_sock;
2017  
2018      {
2019          const NodesSnapshot snap{*this, /*shuffle=*/false};
2020  
2021          const auto timeout = std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS);
2022  
2023          // Check for the readiness of the already connected sockets and the
2024          // listening sockets in one call ("readiness" as in poll(2) or
2025          // select(2)). If none are ready, wait for a short while and return
2026          // empty sets.
2027          events_per_sock = GenerateWaitSockets(snap.Nodes());
2028          if (events_per_sock.empty() || !events_per_sock.begin()->first->WaitMany(timeout, events_per_sock)) {
2029              interruptNet.sleep_for(timeout);
2030          }
2031  
2032          // Service (send/receive) each of the already connected nodes.
2033          SocketHandlerConnected(snap.Nodes(), events_per_sock);
2034      }
2035  
2036      // Accept new connections from listening sockets.
2037      SocketHandlerListening(events_per_sock);
2038  }
2039  
2040  void CConnman::SocketHandlerConnected(const std::vector<CNode*>& nodes,
2041                                        const Sock::EventsPerSock& events_per_sock)
2042  {
2043      AssertLockNotHeld(m_total_bytes_sent_mutex);
2044  
2045      for (CNode* pnode : nodes) {
2046          if (interruptNet)
2047              return;
2048  
2049          //
2050          // Receive
2051          //
2052          bool recvSet = false;
2053          bool sendSet = false;
2054          bool errorSet = false;
2055          {
2056              LOCK(pnode->m_sock_mutex);
2057              if (!pnode->m_sock) {
2058                  continue;
2059              }
2060              const auto it = events_per_sock.find(pnode->m_sock);
2061              if (it != events_per_sock.end()) {
2062                  recvSet = it->second.occurred & Sock::RECV;
2063                  sendSet = it->second.occurred & Sock::SEND;
2064                  errorSet = it->second.occurred & Sock::ERR;
2065              }
2066          }
2067  
2068          if (sendSet) {
2069              // Send data
2070              auto [bytes_sent, data_left] = WITH_LOCK(pnode->cs_vSend, return SocketSendData(*pnode));
2071              if (bytes_sent) {
2072                  RecordBytesSent(bytes_sent);
2073  
2074                  // If both receiving and (non-optimistic) sending were possible, we first attempt
2075                  // sending. If that succeeds, but does not fully drain the send queue, do not
2076                  // attempt to receive. This avoids needlessly queueing data if the remote peer
2077                  // is slow at receiving data, by means of TCP flow control. We only do this when
2078                  // sending actually succeeded to make sure progress is always made; otherwise a
2079                  // deadlock would be possible when both sides have data to send, but neither is
2080                  // receiving.
2081                  if (data_left) recvSet = false;
2082              }
2083          }
2084  
2085          if (recvSet || errorSet)
2086          {
2087              // typical socket buffer is 8K-64K
2088              uint8_t pchBuf[0x10000];
2089              int nBytes = 0;
2090              {
2091                  LOCK(pnode->m_sock_mutex);
2092                  if (!pnode->m_sock) {
2093                      continue;
2094                  }
2095                  nBytes = pnode->m_sock->Recv(pchBuf, sizeof(pchBuf), MSG_DONTWAIT);
2096              }
2097              if (nBytes > 0)
2098              {
2099                  bool notify = false;
2100                  if (!pnode->ReceiveMsgBytes({pchBuf, (size_t)nBytes}, notify)) {
2101                      pnode->CloseSocketDisconnect();
2102                  }
2103                  RecordBytesRecv(nBytes);
2104                  if (notify) {
2105                      pnode->MarkReceivedMsgsForProcessing();
2106                      WakeMessageHandler();
2107                  }
2108              }
2109              else if (nBytes == 0)
2110              {
2111                  // socket closed gracefully
2112                  if (!pnode->fDisconnect) {
2113                      LogPrint(BCLog::NET, "socket closed for peer=%d\n", pnode->GetId());
2114                  }
2115                  pnode->CloseSocketDisconnect();
2116              }
2117              else if (nBytes < 0)
2118              {
2119                  // error
2120                  int nErr = WSAGetLastError();
2121                  if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS)
2122                  {
2123                      if (!pnode->fDisconnect) {
2124                          LogPrint(BCLog::NET, "socket recv error for peer=%d: %s\n", pnode->GetId(), NetworkErrorString(nErr));
2125                      }
2126                      pnode->CloseSocketDisconnect();
2127                  }
2128              }
2129          }
2130  
2131          if (InactivityCheck(*pnode)) pnode->fDisconnect = true;
2132      }
2133  }
2134  
2135  void CConnman::SocketHandlerListening(const Sock::EventsPerSock& events_per_sock)
2136  {
2137      for (const ListenSocket& listen_socket : vhListenSocket) {
2138          if (interruptNet) {
2139              return;
2140          }
2141          const auto it = events_per_sock.find(listen_socket.sock);
2142          if (it != events_per_sock.end() && it->second.occurred & Sock::RECV) {
2143              AcceptConnection(listen_socket);
2144          }
2145      }
2146  }
2147  
2148  void CConnman::ThreadSocketHandler()
2149  {
2150      AssertLockNotHeld(m_total_bytes_sent_mutex);
2151  
2152      while (!interruptNet)
2153      {
2154          DisconnectNodes();
2155          NotifyNumConnectionsChanged();
2156          SocketHandler();
2157      }
2158  }
2159  
2160  void CConnman::WakeMessageHandler()
2161  {
2162      {
2163          LOCK(mutexMsgProc);
2164          fMsgProcWake = true;
2165      }
2166      condMsgProc.notify_one();
2167  }
2168  
2169  void CConnman::ThreadDNSAddressSeed()
2170  {
2171      FastRandomContext rng;
2172      std::vector<std::string> seeds = m_params.DNSSeeds();
2173      Shuffle(seeds.begin(), seeds.end(), rng);
2174      int seeds_right_now = 0; // Number of seeds left before testing if we have enough connections
2175      int found = 0;
2176  
2177      if (gArgs.GetBoolArg("-forcednsseed", DEFAULT_FORCEDNSSEED)) {
2178          // When -forcednsseed is provided, query all.
2179          seeds_right_now = seeds.size();
2180      } else if (addrman.Size() == 0) {
2181          // If we have no known peers, query all.
2182          // This will occur on the first run, or if peers.dat has been
2183          // deleted.
2184          seeds_right_now = seeds.size();
2185      }
2186  
2187      // goal: only query DNS seed if address need is acute
2188      // * If we have a reasonable number of peers in addrman, spend
2189      //   some time trying them first. This improves user privacy by
2190      //   creating fewer identifying DNS requests, reduces trust by
2191      //   giving seeds less influence on the network topology, and
2192      //   reduces traffic to the seeds.
2193      // * When querying DNS seeds query a few at once, this ensures
2194      //   that we don't give DNS seeds the ability to eclipse nodes
2195      //   that query them.
2196      // * If we continue having problems, eventually query all the
2197      //   DNS seeds, and if that fails too, also try the fixed seeds.
2198      //   (done in ThreadOpenConnections)
2199      const std::chrono::seconds seeds_wait_time = (addrman.Size() >= DNSSEEDS_DELAY_PEER_THRESHOLD ? DNSSEEDS_DELAY_MANY_PEERS : DNSSEEDS_DELAY_FEW_PEERS);
2200  
2201      for (const std::string& seed : seeds) {
2202          if (seeds_right_now == 0) {
2203              seeds_right_now += DNSSEEDS_TO_QUERY_AT_ONCE;
2204  
2205              if (addrman.Size() > 0) {
2206                  LogPrintf("Waiting %d seconds before querying DNS seeds.\n", seeds_wait_time.count());
2207                  std::chrono::seconds to_wait = seeds_wait_time;
2208                  while (to_wait.count() > 0) {
2209                      // if sleeping for the MANY_PEERS interval, wake up
2210                      // early to see if we have enough peers and can stop
2211                      // this thread entirely freeing up its resources
2212                      std::chrono::seconds w = std::min(DNSSEEDS_DELAY_FEW_PEERS, to_wait);
2213                      if (!interruptNet.sleep_for(w)) return;
2214                      to_wait -= w;
2215  
2216                      int nRelevant = 0;
2217                      {
2218                          LOCK(m_nodes_mutex);
2219                          for (const CNode* pnode : m_nodes) {
2220                              if (pnode->fSuccessfullyConnected && pnode->IsFullOutboundConn()) ++nRelevant;
2221                          }
2222                      }
2223                      if (nRelevant >= 2) {
2224                          if (found > 0) {
2225                              LogPrintf("%d addresses found from DNS seeds\n", found);
2226                              LogPrintf("P2P peers available. Finished DNS seeding.\n");
2227                          } else {
2228                              LogPrintf("P2P peers available. Skipped DNS seeding.\n");
2229                          }
2230                          return;
2231                      }
2232                  }
2233              }
2234          }
2235  
2236          if (interruptNet) return;
2237  
2238          // hold off on querying seeds if P2P network deactivated
2239          if (!fNetworkActive) {
2240              LogPrintf("Waiting for network to be reactivated before querying DNS seeds.\n");
2241              do {
2242                  if (!interruptNet.sleep_for(std::chrono::seconds{1})) return;
2243              } while (!fNetworkActive);
2244          }
2245  
2246          LogPrintf("Loading addresses from DNS seed %s\n", seed);
2247          // If -proxy is in use, we make an ADDR_FETCH connection to the DNS resolved peer address
2248          // for the base dns seed domain in chainparams
2249          if (HaveNameProxy()) {
2250              AddAddrFetch(seed);
2251          } else {
2252              std::vector<CAddress> vAdd;
2253              constexpr ServiceFlags requiredServiceBits{SeedsServiceFlags()};
2254              std::string host = strprintf("x%x.%s", requiredServiceBits, seed);
2255              CNetAddr resolveSource;
2256              if (!resolveSource.SetInternal(host)) {
2257                  continue;
2258              }
2259              unsigned int nMaxIPs = 256; // Limits number of IPs learned from a DNS seed
2260              const auto addresses{LookupHost(host, nMaxIPs, true)};
2261              if (!addresses.empty()) {
2262                  for (const CNetAddr& ip : addresses) {
2263                      CAddress addr = CAddress(CService(ip, m_params.GetDefaultPort()), requiredServiceBits);
2264                      addr.nTime = rng.rand_uniform_delay(Now<NodeSeconds>() - 3 * 24h, -4 * 24h); // use a random age between 3 and 7 days old
2265                      vAdd.push_back(addr);
2266                      found++;
2267                  }
2268                  addrman.Add(vAdd, resolveSource);
2269              } else {
2270                  // If the seed does not support a subdomain with our desired service bits,
2271                  // we make an ADDR_FETCH connection to the DNS resolved peer address for the
2272                  // base dns seed domain in chainparams
2273                  AddAddrFetch(seed);
2274              }
2275          }
2276          --seeds_right_now;
2277      }
2278      LogPrintf("%d addresses found from DNS seeds\n", found);
2279  }
2280  
2281  void CConnman::DumpAddresses()
2282  {
2283      const auto start{SteadyClock::now()};
2284  
2285      DumpPeerAddresses(::gArgs, addrman);
2286  
2287      LogPrint(BCLog::NET, "Flushed %d addresses to peers.dat  %dms\n",
2288               addrman.Size(), Ticks<std::chrono::milliseconds>(SteadyClock::now() - start));
2289  }
2290  
2291  void CConnman::ProcessAddrFetch()
2292  {
2293      AssertLockNotHeld(m_unused_i2p_sessions_mutex);
2294      std::string strDest;
2295      {
2296          LOCK(m_addr_fetches_mutex);
2297          if (m_addr_fetches.empty())
2298              return;
2299          strDest = m_addr_fetches.front();
2300          m_addr_fetches.pop_front();
2301      }
2302      // Attempt v2 connection if we support v2 - we'll reconnect with v1 if our
2303      // peer doesn't support it or immediately disconnects us for another reason.
2304      const bool use_v2transport(GetLocalServices() & NODE_P2P_V2);
2305      CAddress addr;
2306      CSemaphoreGrant grant(*semOutbound, /*fTry=*/true);
2307      if (grant) {
2308          OpenNetworkConnection(addr, false, std::move(grant), strDest.c_str(), ConnectionType::ADDR_FETCH, use_v2transport);
2309      }
2310  }
2311  
2312  bool CConnman::GetTryNewOutboundPeer() const
2313  {
2314      return m_try_another_outbound_peer;
2315  }
2316  
2317  void CConnman::SetTryNewOutboundPeer(bool flag)
2318  {
2319      m_try_another_outbound_peer = flag;
2320      LogPrint(BCLog::NET, "setting try another outbound peer=%s\n", flag ? "true" : "false");
2321  }
2322  
2323  void CConnman::StartExtraBlockRelayPeers()
2324  {
2325      LogPrint(BCLog::NET, "enabling extra block-relay-only peers\n");
2326      m_start_extra_block_relay_peers = true;
2327  }
2328  
2329  // Return the number of peers we have over our outbound connection limit
2330  // Exclude peers that are marked for disconnect, or are going to be
2331  // disconnected soon (eg ADDR_FETCH and FEELER)
2332  // Also exclude peers that haven't finished initial connection handshake yet
2333  // (so that we don't decide we're over our desired connection limit, and then
2334  // evict some peer that has finished the handshake)
2335  int CConnman::GetExtraFullOutboundCount() const
2336  {
2337      int full_outbound_peers = 0;
2338      {
2339          LOCK(m_nodes_mutex);
2340          for (const CNode* pnode : m_nodes) {
2341              if (pnode->fSuccessfullyConnected && !pnode->fDisconnect && pnode->IsFullOutboundConn()) {
2342                  ++full_outbound_peers;
2343              }
2344          }
2345      }
2346      return std::max(full_outbound_peers - m_max_outbound_full_relay, 0);
2347  }
2348  
2349  int CConnman::GetExtraBlockRelayCount() const
2350  {
2351      int block_relay_peers = 0;
2352      {
2353          LOCK(m_nodes_mutex);
2354          for (const CNode* pnode : m_nodes) {
2355              if (pnode->fSuccessfullyConnected && !pnode->fDisconnect && pnode->IsBlockOnlyConn()) {
2356                  ++block_relay_peers;
2357              }
2358          }
2359      }
2360      return std::max(block_relay_peers - m_max_outbound_block_relay, 0);
2361  }
2362  
2363  std::unordered_set<Network> CConnman::GetReachableEmptyNetworks() const
2364  {
2365      std::unordered_set<Network> networks{};
2366      for (int n = 0; n < NET_MAX; n++) {
2367          enum Network net = (enum Network)n;
2368          if (net == NET_UNROUTABLE || net == NET_INTERNAL) continue;
2369          if (g_reachable_nets.Contains(net) && addrman.Size(net, std::nullopt) == 0) {
2370              networks.insert(net);
2371          }
2372      }
2373      return networks;
2374  }
2375  
2376  bool CConnman::MultipleManualOrFullOutboundConns(Network net) const
2377  {
2378      AssertLockHeld(m_nodes_mutex);
2379      return m_network_conn_counts[net] > 1;
2380  }
2381  
2382  bool CConnman::MaybePickPreferredNetwork(std::optional<Network>& network)
2383  {
2384      std::array<Network, 5> nets{NET_IPV4, NET_IPV6, NET_ONION, NET_I2P, NET_CJDNS};
2385      Shuffle(nets.begin(), nets.end(), FastRandomContext());
2386  
2387      LOCK(m_nodes_mutex);
2388      for (const auto net : nets) {
2389          if (g_reachable_nets.Contains(net) && m_network_conn_counts[net] == 0 && addrman.Size(net) != 0) {
2390              network = net;
2391              return true;
2392          }
2393      }
2394  
2395      return false;
2396  }
2397  
2398  void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
2399  {
2400      AssertLockNotHeld(m_unused_i2p_sessions_mutex);
2401      AssertLockNotHeld(m_reconnections_mutex);
2402      FastRandomContext rng;
2403      // Connect to specific addresses
2404      if (!connect.empty())
2405      {
2406          // Attempt v2 connection if we support v2 - we'll reconnect with v1 if our
2407          // peer doesn't support it or immediately disconnects us for another reason.
2408          const bool use_v2transport(GetLocalServices() & NODE_P2P_V2);
2409          for (int64_t nLoop = 0;; nLoop++)
2410          {
2411              for (const std::string& strAddr : connect)
2412              {
2413                  CAddress addr(CService(), NODE_NONE);
2414                  OpenNetworkConnection(addr, false, {}, strAddr.c_str(), ConnectionType::MANUAL, /*use_v2transport=*/use_v2transport);
2415                  for (int i = 0; i < 10 && i < nLoop; i++)
2416                  {
2417                      if (!interruptNet.sleep_for(std::chrono::milliseconds(500)))
2418                          return;
2419                  }
2420              }
2421              if (!interruptNet.sleep_for(std::chrono::milliseconds(500)))
2422                  return;
2423              PerformReconnections();
2424          }
2425      }
2426  
2427      // Initiate network connections
2428      auto start = GetTime<std::chrono::microseconds>();
2429  
2430      // Minimum time before next feeler connection (in microseconds).
2431      auto next_feeler = GetExponentialRand(start, FEELER_INTERVAL);
2432      auto next_extra_block_relay = GetExponentialRand(start, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
2433      auto next_extra_network_peer{GetExponentialRand(start, EXTRA_NETWORK_PEER_INTERVAL)};
2434      const bool dnsseed = gArgs.GetBoolArg("-dnsseed", DEFAULT_DNSSEED);
2435      bool add_fixed_seeds = gArgs.GetBoolArg("-fixedseeds", DEFAULT_FIXEDSEEDS);
2436      const bool use_seednodes{gArgs.IsArgSet("-seednode")};
2437  
2438      if (!add_fixed_seeds) {
2439          LogPrintf("Fixed seeds are disabled\n");
2440      }
2441  
2442      while (!interruptNet)
2443      {
2444          ProcessAddrFetch();
2445  
2446          if (!interruptNet.sleep_for(std::chrono::milliseconds(500)))
2447              return;
2448  
2449          PerformReconnections();
2450  
2451          CSemaphoreGrant grant(*semOutbound);
2452          if (interruptNet)
2453              return;
2454  
2455          const std::unordered_set<Network> fixed_seed_networks{GetReachableEmptyNetworks()};
2456          if (add_fixed_seeds && !fixed_seed_networks.empty()) {
2457              // When the node starts with an empty peers.dat, there are a few other sources of peers before
2458              // we fallback on to fixed seeds: -dnsseed, -seednode, -addnode
2459              // If none of those are available, we fallback on to fixed seeds immediately, else we allow
2460              // 60 seconds for any of those sources to populate addrman.
2461              bool add_fixed_seeds_now = false;
2462              // It is cheapest to check if enough time has passed first.
2463              if (GetTime<std::chrono::seconds>() > start + std::chrono::minutes{1}) {
2464                  add_fixed_seeds_now = true;
2465                  LogPrintf("Adding fixed seeds as 60 seconds have passed and addrman is empty for at least one reachable network\n");
2466              }
2467  
2468              // Perform cheap checks before locking a mutex.
2469              else if (!dnsseed && !use_seednodes) {
2470                  LOCK(m_added_nodes_mutex);
2471                  if (m_added_node_params.empty()) {
2472                      add_fixed_seeds_now = true;
2473                      LogPrintf("Adding fixed seeds as -dnsseed=0 (or IPv4/IPv6 connections are disabled via -onlynet) and neither -addnode nor -seednode are provided\n");
2474                  }
2475              }
2476  
2477              if (add_fixed_seeds_now) {
2478                  std::vector<CAddress> seed_addrs{ConvertSeeds(m_params.FixedSeeds())};
2479                  // We will not make outgoing connections to peers that are unreachable
2480                  // (e.g. because of -onlynet configuration).
2481                  // Therefore, we do not add them to addrman in the first place.
2482                  // In case previously unreachable networks become reachable
2483                  // (e.g. in case of -onlynet changes by the user), fixed seeds will
2484                  // be loaded only for networks for which we have no addresses.
2485                  seed_addrs.erase(std::remove_if(seed_addrs.begin(), seed_addrs.end(),
2486                                                  [&fixed_seed_networks](const CAddress& addr) { return fixed_seed_networks.count(addr.GetNetwork()) == 0; }),
2487                                   seed_addrs.end());
2488                  CNetAddr local;
2489                  local.SetInternal("fixedseeds");
2490                  addrman.Add(seed_addrs, local);
2491                  add_fixed_seeds = false;
2492                  LogPrintf("Added %d fixed seeds from reachable networks.\n", seed_addrs.size());
2493              }
2494          }
2495  
2496          //
2497          // Choose an address to connect to based on most recently seen
2498          //
2499          CAddress addrConnect;
2500  
2501          // Only connect out to one peer per ipv4/ipv6 network group (/16 for IPv4).
2502          int nOutboundFullRelay = 0;
2503          int nOutboundBlockRelay = 0;
2504          int outbound_privacy_network_peers = 0;
2505          std::set<std::vector<unsigned char>> outbound_ipv46_peer_netgroups;
2506  
2507          {
2508              LOCK(m_nodes_mutex);
2509              for (const CNode* pnode : m_nodes) {
2510                  if (pnode->IsFullOutboundConn()) nOutboundFullRelay++;
2511                  if (pnode->IsBlockOnlyConn()) nOutboundBlockRelay++;
2512  
2513                  // Make sure our persistent outbound slots to ipv4/ipv6 peers belong to different netgroups.
2514                  switch (pnode->m_conn_type) {
2515                      // We currently don't take inbound connections into account. Since they are
2516                      // free to make, an attacker could make them to prevent us from connecting to
2517                      // certain peers.
2518                      case ConnectionType::INBOUND:
2519                      // Short-lived outbound connections should not affect how we select outbound
2520                      // peers from addrman.
2521                      case ConnectionType::ADDR_FETCH:
2522                      case ConnectionType::FEELER:
2523                          break;
2524                      case ConnectionType::MANUAL:
2525                      case ConnectionType::OUTBOUND_FULL_RELAY:
2526                      case ConnectionType::BLOCK_RELAY:
2527                          const CAddress address{pnode->addr};
2528                          if (address.IsTor() || address.IsI2P() || address.IsCJDNS()) {
2529                              // Since our addrman-groups for these networks are
2530                              // random, without relation to the route we
2531                              // take to connect to these peers or to the
2532                              // difficulty in obtaining addresses with diverse
2533                              // groups, we don't worry about diversity with
2534                              // respect to our addrman groups when connecting to
2535                              // these networks.
2536                              ++outbound_privacy_network_peers;
2537                          } else {
2538                              outbound_ipv46_peer_netgroups.insert(m_netgroupman.GetGroup(address));
2539                          }
2540                  } // no default case, so the compiler can warn about missing cases
2541              }
2542          }
2543  
2544          ConnectionType conn_type = ConnectionType::OUTBOUND_FULL_RELAY;
2545          auto now = GetTime<std::chrono::microseconds>();
2546          bool anchor = false;
2547          bool fFeeler = false;
2548          std::optional<Network> preferred_net;
2549  
2550          // Determine what type of connection to open. Opening
2551          // BLOCK_RELAY connections to addresses from anchors.dat gets the highest
2552          // priority. Then we open OUTBOUND_FULL_RELAY priority until we
2553          // meet our full-relay capacity. Then we open BLOCK_RELAY connection
2554          // until we hit our block-relay-only peer limit.
2555          // GetTryNewOutboundPeer() gets set when a stale tip is detected, so we
2556          // try opening an additional OUTBOUND_FULL_RELAY connection. If none of
2557          // these conditions are met, check to see if it's time to try an extra
2558          // block-relay-only peer (to confirm our tip is current, see below) or the next_feeler
2559          // timer to decide if we should open a FEELER.
2560  
2561          if (!m_anchors.empty() && (nOutboundBlockRelay < m_max_outbound_block_relay)) {
2562              conn_type = ConnectionType::BLOCK_RELAY;
2563              anchor = true;
2564          } else if (nOutboundFullRelay < m_max_outbound_full_relay) {
2565              // OUTBOUND_FULL_RELAY
2566          } else if (nOutboundBlockRelay < m_max_outbound_block_relay) {
2567              conn_type = ConnectionType::BLOCK_RELAY;
2568          } else if (GetTryNewOutboundPeer()) {
2569              // OUTBOUND_FULL_RELAY
2570          } else if (now > next_extra_block_relay && m_start_extra_block_relay_peers) {
2571              // Periodically connect to a peer (using regular outbound selection
2572              // methodology from addrman) and stay connected long enough to sync
2573              // headers, but not much else.
2574              //
2575              // Then disconnect the peer, if we haven't learned anything new.
2576              //
2577              // The idea is to make eclipse attacks very difficult to pull off,
2578              // because every few minutes we're finding a new peer to learn headers
2579              // from.
2580              //
2581              // This is similar to the logic for trying extra outbound (full-relay)
2582              // peers, except:
2583              // - we do this all the time on an exponential timer, rather than just when
2584              //   our tip is stale
2585              // - we potentially disconnect our next-youngest block-relay-only peer, if our
2586              //   newest block-relay-only peer delivers a block more recently.
2587              //   See the eviction logic in net_processing.cpp.
2588              //
2589              // Because we can promote these connections to block-relay-only
2590              // connections, they do not get their own ConnectionType enum
2591              // (similar to how we deal with extra outbound peers).
2592              next_extra_block_relay = GetExponentialRand(now, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
2593              conn_type = ConnectionType::BLOCK_RELAY;
2594          } else if (now > next_feeler) {
2595              next_feeler = GetExponentialRand(now, FEELER_INTERVAL);
2596              conn_type = ConnectionType::FEELER;
2597              fFeeler = true;
2598          } else if (nOutboundFullRelay == m_max_outbound_full_relay &&
2599                     m_max_outbound_full_relay == MAX_OUTBOUND_FULL_RELAY_CONNECTIONS &&
2600                     now > next_extra_network_peer &&
2601                     MaybePickPreferredNetwork(preferred_net)) {
2602              // Full outbound connection management: Attempt to get at least one
2603              // outbound peer from each reachable network by making extra connections
2604              // and then protecting "only" peers from a network during outbound eviction.
2605              // This is not attempted if the user changed -maxconnections to a value
2606              // so low that less than MAX_OUTBOUND_FULL_RELAY_CONNECTIONS are made,
2607              // to prevent interactions with otherwise protected outbound peers.
2608              next_extra_network_peer = GetExponentialRand(now, EXTRA_NETWORK_PEER_INTERVAL);
2609          } else {
2610              // skip to next iteration of while loop
2611              continue;
2612          }
2613  
2614          addrman.ResolveCollisions();
2615  
2616          const auto current_time{NodeClock::now()};
2617          int nTries = 0;
2618          while (!interruptNet)
2619          {
2620              if (anchor && !m_anchors.empty()) {
2621                  const CAddress addr = m_anchors.back();
2622                  m_anchors.pop_back();
2623                  if (!addr.IsValid() || IsLocal(addr) || !g_reachable_nets.Contains(addr) ||
2624                      !m_msgproc->HasAllDesirableServiceFlags(addr.nServices) ||
2625                      outbound_ipv46_peer_netgroups.count(m_netgroupman.GetGroup(addr))) continue;
2626                  addrConnect = addr;
2627                  LogPrint(BCLog::NET, "Trying to make an anchor connection to %s\n", addrConnect.ToStringAddrPort());
2628                  break;
2629              }
2630  
2631              // If we didn't find an appropriate destination after trying 100 addresses fetched from addrman,
2632              // stop this loop, and let the outer loop run again (which sleeps, adds seed nodes, recalculates
2633              // already-connected network ranges, ...) before trying new addrman addresses.
2634              nTries++;
2635              if (nTries > 100)
2636                  break;
2637  
2638              CAddress addr;
2639              NodeSeconds addr_last_try{0s};
2640  
2641              if (fFeeler) {
2642                  // First, try to get a tried table collision address. This returns
2643                  // an empty (invalid) address if there are no collisions to try.
2644                  std::tie(addr, addr_last_try) = addrman.SelectTriedCollision();
2645  
2646                  if (!addr.IsValid()) {
2647                      // No tried table collisions. Select a new table address
2648                      // for our feeler.
2649                      std::tie(addr, addr_last_try) = addrman.Select(true);
2650                  } else if (AlreadyConnectedToAddress(addr)) {
2651                      // If test-before-evict logic would have us connect to a
2652                      // peer that we're already connected to, just mark that
2653                      // address as Good(). We won't be able to initiate the
2654                      // connection anyway, so this avoids inadvertently evicting
2655                      // a currently-connected peer.
2656                      addrman.Good(addr);
2657                      // Select a new table address for our feeler instead.
2658                      std::tie(addr, addr_last_try) = addrman.Select(true);
2659                  }
2660              } else {
2661                  // Not a feeler
2662                  // If preferred_net has a value set, pick an extra outbound
2663                  // peer from that network. The eviction logic in net_processing
2664                  // ensures that a peer from another network will be evicted.
2665                  std::tie(addr, addr_last_try) = addrman.Select(false, preferred_net);
2666              }
2667  
2668              // Require outbound IPv4/IPv6 connections, other than feelers, to be to distinct network groups
2669              if (!fFeeler && outbound_ipv46_peer_netgroups.count(m_netgroupman.GetGroup(addr))) {
2670                  continue;
2671              }
2672  
2673              // if we selected an invalid or local address, restart
2674              if (!addr.IsValid() || IsLocal(addr)) {
2675                  break;
2676              }
2677  
2678              if (!g_reachable_nets.Contains(addr)) {
2679                  continue;
2680              }
2681  
2682              // only consider very recently tried nodes after 30 failed attempts
2683              if (current_time - addr_last_try < 10min && nTries < 30) {
2684                  continue;
2685              }
2686  
2687              // for non-feelers, require all the services we'll want,
2688              // for feelers, only require they be a full node (only because most
2689              // SPV clients don't have a good address DB available)
2690              if (!fFeeler && !m_msgproc->HasAllDesirableServiceFlags(addr.nServices)) {
2691                  continue;
2692              } else if (fFeeler && !MayHaveUsefulAddressDB(addr.nServices)) {
2693                  continue;
2694              }
2695  
2696              // Do not connect to bad ports, unless 50 invalid addresses have been selected already.
2697              if (nTries < 50 && (addr.IsIPv4() || addr.IsIPv6()) && IsBadPort(addr.GetPort())) {
2698                  continue;
2699              }
2700  
2701              // Do not make automatic outbound connections to addnode peers, to
2702              // not use our limited outbound slots for them and to ensure
2703              // addnode connections benefit from their intended protections.
2704              if (AddedNodesContain(addr)) {
2705                  LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "Not making automatic %s%s connection to %s peer selected for manual (addnode) connection%s\n",
2706                                preferred_net.has_value() ? "network-specific " : "",
2707                                ConnectionTypeAsString(conn_type), GetNetworkName(addr.GetNetwork()),
2708                                fLogIPs ? strprintf(": %s", addr.ToStringAddrPort()) : "");
2709                  continue;
2710              }
2711  
2712              addrConnect = addr;
2713              break;
2714          }
2715  
2716          if (addrConnect.IsValid()) {
2717              if (fFeeler) {
2718                  // Add small amount of random noise before connection to avoid synchronization.
2719                  if (!interruptNet.sleep_for(rng.rand_uniform_duration<CThreadInterrupt::Clock>(FEELER_SLEEP_WINDOW))) {
2720                      return;
2721                  }
2722                  LogPrint(BCLog::NET, "Making feeler connection to %s\n", addrConnect.ToStringAddrPort());
2723              }
2724  
2725              if (preferred_net != std::nullopt) LogPrint(BCLog::NET, "Making network specific connection to %s on %s.\n", addrConnect.ToStringAddrPort(), GetNetworkName(preferred_net.value()));
2726  
2727              // Record addrman failure attempts when node has at least 2 persistent outbound connections to peers with
2728              // different netgroups in ipv4/ipv6 networks + all peers in Tor/I2P/CJDNS networks.
2729              // Don't record addrman failure attempts when node is offline. This can be identified since all local
2730              // network connections (if any) belong in the same netgroup, and the size of `outbound_ipv46_peer_netgroups` would only be 1.
2731              const bool count_failures{((int)outbound_ipv46_peer_netgroups.size() + outbound_privacy_network_peers) >= std::min(m_max_automatic_connections - 1, 2)};
2732              // Use BIP324 transport when both us and them have NODE_V2_P2P set.
2733              const bool use_v2transport(addrConnect.nServices & GetLocalServices() & NODE_P2P_V2);
2734              OpenNetworkConnection(addrConnect, count_failures, std::move(grant), /*strDest=*/nullptr, conn_type, use_v2transport);
2735          }
2736      }
2737  }
2738  
2739  std::vector<CAddress> CConnman::GetCurrentBlockRelayOnlyConns() const
2740  {
2741      std::vector<CAddress> ret;
2742      LOCK(m_nodes_mutex);
2743      for (const CNode* pnode : m_nodes) {
2744          if (pnode->IsBlockOnlyConn()) {
2745              ret.push_back(pnode->addr);
2746          }
2747      }
2748  
2749      return ret;
2750  }
2751  
2752  std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo(bool include_connected) const
2753  {
2754      std::vector<AddedNodeInfo> ret;
2755  
2756      std::list<AddedNodeParams> lAddresses(0);
2757      {
2758          LOCK(m_added_nodes_mutex);
2759          ret.reserve(m_added_node_params.size());
2760          std::copy(m_added_node_params.cbegin(), m_added_node_params.cend(), std::back_inserter(lAddresses));
2761      }
2762  
2763  
2764      // Build a map of all already connected addresses (by IP:port and by name) to inbound/outbound and resolved CService
2765      std::map<CService, bool> mapConnected;
2766      std::map<std::string, std::pair<bool, CService>> mapConnectedByName;
2767      {
2768          LOCK(m_nodes_mutex);
2769          for (const CNode* pnode : m_nodes) {
2770              if (pnode->addr.IsValid()) {
2771                  mapConnected[pnode->addr] = pnode->IsInboundConn();
2772              }
2773              std::string addrName{pnode->m_addr_name};
2774              if (!addrName.empty()) {
2775                  mapConnectedByName[std::move(addrName)] = std::make_pair(pnode->IsInboundConn(), static_cast<const CService&>(pnode->addr));
2776              }
2777          }
2778      }
2779  
2780      for (const auto& addr : lAddresses) {
2781          CService service(LookupNumeric(addr.m_added_node, GetDefaultPort(addr.m_added_node)));
2782          AddedNodeInfo addedNode{addr, CService(), false, false};
2783          if (service.IsValid()) {
2784              // strAddNode is an IP:port
2785              auto it = mapConnected.find(service);
2786              if (it != mapConnected.end()) {
2787                  if (!include_connected) {
2788                      continue;
2789                  }
2790                  addedNode.resolvedAddress = service;
2791                  addedNode.fConnected = true;
2792                  addedNode.fInbound = it->second;
2793              }
2794          } else {
2795              // strAddNode is a name
2796              auto it = mapConnectedByName.find(addr.m_added_node);
2797              if (it != mapConnectedByName.end()) {
2798                  if (!include_connected) {
2799                      continue;
2800                  }
2801                  addedNode.resolvedAddress = it->second.second;
2802                  addedNode.fConnected = true;
2803                  addedNode.fInbound = it->second.first;
2804              }
2805          }
2806          ret.emplace_back(std::move(addedNode));
2807      }
2808  
2809      return ret;
2810  }
2811  
2812  void CConnman::ThreadOpenAddedConnections()
2813  {
2814      AssertLockNotHeld(m_unused_i2p_sessions_mutex);
2815      AssertLockNotHeld(m_reconnections_mutex);
2816      while (true)
2817      {
2818          CSemaphoreGrant grant(*semAddnode);
2819          std::vector<AddedNodeInfo> vInfo = GetAddedNodeInfo(/*include_connected=*/false);
2820          bool tried = false;
2821          for (const AddedNodeInfo& info : vInfo) {
2822              if (!grant) {
2823                  // If we've used up our semaphore and need a new one, let's not wait here since while we are waiting
2824                  // the addednodeinfo state might change.
2825                  break;
2826              }
2827              tried = true;
2828              CAddress addr(CService(), NODE_NONE);
2829              OpenNetworkConnection(addr, false, std::move(grant), info.m_params.m_added_node.c_str(), ConnectionType::MANUAL, info.m_params.m_use_v2transport);
2830              if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) return;
2831              grant = CSemaphoreGrant(*semAddnode, /*fTry=*/true);
2832          }
2833          // See if any reconnections are desired.
2834          PerformReconnections();
2835          // Retry every 60 seconds if a connection was attempted, otherwise two seconds
2836          if (!interruptNet.sleep_for(std::chrono::seconds(tried ? 60 : 2)))
2837              return;
2838      }
2839  }
2840  
2841  // if successful, this moves the passed grant to the constructed node
2842  void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant&& grant_outbound, const char *pszDest, ConnectionType conn_type, bool use_v2transport)
2843  {
2844      AssertLockNotHeld(m_unused_i2p_sessions_mutex);
2845      assert(conn_type != ConnectionType::INBOUND);
2846  
2847      //
2848      // Initiate outbound network connection
2849      //
2850      if (interruptNet) {
2851          return;
2852      }
2853      if (!fNetworkActive) {
2854          return;
2855      }
2856      if (!pszDest) {
2857          bool banned_or_discouraged = m_banman && (m_banman->IsDiscouraged(addrConnect) || m_banman->IsBanned(addrConnect));
2858          if (IsLocal(addrConnect) || banned_or_discouraged || AlreadyConnectedToAddress(addrConnect)) {
2859              return;
2860          }
2861      } else if (FindNode(std::string(pszDest)))
2862          return;
2863  
2864      CNode* pnode = ConnectNode(addrConnect, pszDest, fCountFailure, conn_type, use_v2transport);
2865  
2866      if (!pnode)
2867          return;
2868      pnode->grantOutbound = std::move(grant_outbound);
2869  
2870      m_msgproc->InitializeNode(*pnode, nLocalServices);
2871      {
2872          LOCK(m_nodes_mutex);
2873          m_nodes.push_back(pnode);
2874  
2875          // update connection count by network
2876          if (pnode->IsManualOrFullOutboundConn()) ++m_network_conn_counts[pnode->addr.GetNetwork()];
2877      }
2878  }
2879  
2880  Mutex NetEventsInterface::g_msgproc_mutex;
2881  
2882  void CConnman::ThreadMessageHandler()
2883  {
2884      LOCK(NetEventsInterface::g_msgproc_mutex);
2885  
2886      while (!flagInterruptMsgProc)
2887      {
2888          bool fMoreWork = false;
2889  
2890          {
2891              // Randomize the order in which we process messages from/to our peers.
2892              // This prevents attacks in which an attacker exploits having multiple
2893              // consecutive connections in the m_nodes list.
2894              const NodesSnapshot snap{*this, /*shuffle=*/true};
2895  
2896              for (CNode* pnode : snap.Nodes()) {
2897                  if (pnode->fDisconnect)
2898                      continue;
2899  
2900                  // Receive messages
2901                  bool fMoreNodeWork = m_msgproc->ProcessMessages(pnode, flagInterruptMsgProc);
2902                  fMoreWork |= (fMoreNodeWork && !pnode->fPauseSend);
2903                  if (flagInterruptMsgProc)
2904                      return;
2905                  // Send messages
2906                  m_msgproc->SendMessages(pnode);
2907  
2908                  if (flagInterruptMsgProc)
2909                      return;
2910              }
2911          }
2912  
2913          WAIT_LOCK(mutexMsgProc, lock);
2914          if (!fMoreWork) {
2915              condMsgProc.wait_until(lock, std::chrono::steady_clock::now() + std::chrono::milliseconds(100), [this]() EXCLUSIVE_LOCKS_REQUIRED(mutexMsgProc) { return fMsgProcWake; });
2916          }
2917          fMsgProcWake = false;
2918      }
2919  }
2920  
2921  void CConnman::ThreadI2PAcceptIncoming()
2922  {
2923      static constexpr auto err_wait_begin = 1s;
2924      static constexpr auto err_wait_cap = 5min;
2925      auto err_wait = err_wait_begin;
2926  
2927      bool advertising_listen_addr = false;
2928      i2p::Connection conn;
2929  
2930      auto SleepOnFailure = [&]() {
2931          interruptNet.sleep_for(err_wait);
2932          if (err_wait < err_wait_cap) {
2933              err_wait += 1s;
2934          }
2935      };
2936  
2937      while (!interruptNet) {
2938  
2939          if (!m_i2p_sam_session->Listen(conn)) {
2940              if (advertising_listen_addr && conn.me.IsValid()) {
2941                  RemoveLocal(conn.me);
2942                  advertising_listen_addr = false;
2943              }
2944              SleepOnFailure();
2945              continue;
2946          }
2947  
2948          if (!advertising_listen_addr) {
2949              AddLocal(conn.me, LOCAL_MANUAL);
2950              advertising_listen_addr = true;
2951          }
2952  
2953          if (!m_i2p_sam_session->Accept(conn)) {
2954              SleepOnFailure();
2955              continue;
2956          }
2957  
2958          CreateNodeFromAcceptedSocket(std::move(conn.sock), NetPermissionFlags::None,
2959                                       CAddress{conn.me, NODE_NONE}, CAddress{conn.peer, NODE_NONE});
2960  
2961          err_wait = err_wait_begin;
2962      }
2963  }
2964  
2965  bool CConnman::BindListenPort(const CService& addrBind, bilingual_str& strError, NetPermissionFlags permissions)
2966  {
2967      int nOne = 1;
2968  
2969      // Create socket for listening for incoming connections
2970      struct sockaddr_storage sockaddr;
2971      socklen_t len = sizeof(sockaddr);
2972      if (!addrBind.GetSockAddr((struct sockaddr*)&sockaddr, &len))
2973      {
2974          strError = strprintf(Untranslated("Bind address family for %s not supported"), addrBind.ToStringAddrPort());
2975          LogPrintLevel(BCLog::NET, BCLog::Level::Error, "%s\n", strError.original);
2976          return false;
2977      }
2978  
2979      std::unique_ptr<Sock> sock = CreateSock(addrBind.GetSAFamily());
2980      if (!sock) {
2981          strError = strprintf(Untranslated("Couldn't open socket for incoming connections (socket returned error %s)"), NetworkErrorString(WSAGetLastError()));
2982          LogPrintLevel(BCLog::NET, BCLog::Level::Error, "%s\n", strError.original);
2983          return false;
2984      }
2985  
2986      // Allow binding if the port is still in TIME_WAIT state after
2987      // the program was closed and restarted.
2988      if (sock->SetSockOpt(SOL_SOCKET, SO_REUSEADDR, (sockopt_arg_type)&nOne, sizeof(int)) == SOCKET_ERROR) {
2989          strError = strprintf(Untranslated("Error setting SO_REUSEADDR on socket: %s, continuing anyway"), NetworkErrorString(WSAGetLastError()));
2990          LogPrintf("%s\n", strError.original);
2991      }
2992  
2993      // some systems don't have IPV6_V6ONLY but are always v6only; others do have the option
2994      // and enable it by default or not. Try to enable it, if possible.
2995      if (addrBind.IsIPv6()) {
2996  #ifdef IPV6_V6ONLY
2997          if (sock->SetSockOpt(IPPROTO_IPV6, IPV6_V6ONLY, (sockopt_arg_type)&nOne, sizeof(int)) == SOCKET_ERROR) {
2998              strError = strprintf(Untranslated("Error setting IPV6_V6ONLY on socket: %s, continuing anyway"), NetworkErrorString(WSAGetLastError()));
2999              LogPrintf("%s\n", strError.original);
3000          }
3001  #endif
3002  #ifdef WIN32
3003          int nProtLevel = PROTECTION_LEVEL_UNRESTRICTED;
3004          if (sock->SetSockOpt(IPPROTO_IPV6, IPV6_PROTECTION_LEVEL, (const char*)&nProtLevel, sizeof(int)) == SOCKET_ERROR) {
3005              strError = strprintf(Untranslated("Error setting IPV6_PROTECTION_LEVEL on socket: %s, continuing anyway"), NetworkErrorString(WSAGetLastError()));
3006              LogPrintf("%s\n", strError.original);
3007          }
3008  #endif
3009      }
3010  
3011      if (sock->Bind(reinterpret_cast<struct sockaddr*>(&sockaddr), len) == SOCKET_ERROR) {
3012          int nErr = WSAGetLastError();
3013          if (nErr == WSAEADDRINUSE)
3014              strError = strprintf(_("Unable to bind to %s on this computer. %s is probably already running."), addrBind.ToStringAddrPort(), PACKAGE_NAME);
3015          else
3016              strError = strprintf(_("Unable to bind to %s on this computer (bind returned error %s)"), addrBind.ToStringAddrPort(), NetworkErrorString(nErr));
3017          LogPrintLevel(BCLog::NET, BCLog::Level::Error, "%s\n", strError.original);
3018          return false;
3019      }
3020      LogPrintf("Bound to %s\n", addrBind.ToStringAddrPort());
3021  
3022      // Listen for incoming connections
3023      if (sock->Listen(SOMAXCONN) == SOCKET_ERROR)
3024      {
3025          strError = strprintf(_("Listening for incoming connections failed (listen returned error %s)"), NetworkErrorString(WSAGetLastError()));
3026          LogPrintLevel(BCLog::NET, BCLog::Level::Error, "%s\n", strError.original);
3027          return false;
3028      }
3029  
3030      vhListenSocket.emplace_back(std::move(sock), permissions);
3031      return true;
3032  }
3033  
3034  void Discover()
3035  {
3036      if (!fDiscover)
3037          return;
3038  
3039  #ifdef WIN32
3040      // Get local host IP
3041      char pszHostName[256] = "";
3042      if (gethostname(pszHostName, sizeof(pszHostName)) != SOCKET_ERROR)
3043      {
3044          const std::vector<CNetAddr> addresses{LookupHost(pszHostName, 0, true)};
3045          for (const CNetAddr& addr : addresses)
3046          {
3047              if (AddLocal(addr, LOCAL_IF))
3048                  LogPrintf("%s: %s - %s\n", __func__, pszHostName, addr.ToStringAddr());
3049          }
3050      }
3051  #elif (HAVE_DECL_GETIFADDRS && HAVE_DECL_FREEIFADDRS)
3052      // Get local host ip
3053      struct ifaddrs* myaddrs;
3054      if (getifaddrs(&myaddrs) == 0)
3055      {
3056          for (struct ifaddrs* ifa = myaddrs; ifa != nullptr; ifa = ifa->ifa_next)
3057          {
3058              if (ifa->ifa_addr == nullptr) continue;
3059              if ((ifa->ifa_flags & IFF_UP) == 0) continue;
3060              if (strcmp(ifa->ifa_name, "lo") == 0) continue;
3061              if (strcmp(ifa->ifa_name, "lo0") == 0) continue;
3062              if (ifa->ifa_addr->sa_family == AF_INET)
3063              {
3064                  struct sockaddr_in* s4 = (struct sockaddr_in*)(ifa->ifa_addr);
3065                  CNetAddr addr(s4->sin_addr);
3066                  if (AddLocal(addr, LOCAL_IF))
3067                      LogPrintf("%s: IPv4 %s: %s\n", __func__, ifa->ifa_name, addr.ToStringAddr());
3068              }
3069              else if (ifa->ifa_addr->sa_family == AF_INET6)
3070              {
3071                  struct sockaddr_in6* s6 = (struct sockaddr_in6*)(ifa->ifa_addr);
3072                  CNetAddr addr(s6->sin6_addr);
3073                  if (AddLocal(addr, LOCAL_IF))
3074                      LogPrintf("%s: IPv6 %s: %s\n", __func__, ifa->ifa_name, addr.ToStringAddr());
3075              }
3076          }
3077          freeifaddrs(myaddrs);
3078      }
3079  #endif
3080  }
3081  
3082  void CConnman::SetNetworkActive(bool active)
3083  {
3084      LogPrintf("%s: %s\n", __func__, active);
3085  
3086      if (fNetworkActive == active) {
3087          return;
3088      }
3089  
3090      fNetworkActive = active;
3091  
3092      if (m_client_interface) {
3093          m_client_interface->NotifyNetworkActiveChanged(fNetworkActive);
3094      }
3095  }
3096  
3097  CConnman::CConnman(uint64_t nSeed0In, uint64_t nSeed1In, AddrMan& addrman_in,
3098                     const NetGroupManager& netgroupman, const CChainParams& params, bool network_active)
3099      : addrman(addrman_in)
3100      , m_netgroupman{netgroupman}
3101      , nSeed0(nSeed0In)
3102      , nSeed1(nSeed1In)
3103      , m_params(params)
3104  {
3105      SetTryNewOutboundPeer(false);
3106  
3107      Options connOptions;
3108      Init(connOptions);
3109      SetNetworkActive(network_active);
3110  }
3111  
3112  NodeId CConnman::GetNewNodeId()
3113  {
3114      return nLastNodeId.fetch_add(1, std::memory_order_relaxed);
3115  }
3116  
3117  uint16_t CConnman::GetDefaultPort(Network net) const
3118  {
3119      return net == NET_I2P ? I2P_SAM31_PORT : m_params.GetDefaultPort();
3120  }
3121  
3122  uint16_t CConnman::GetDefaultPort(const std::string& addr) const
3123  {
3124      CNetAddr a;
3125      return a.SetSpecial(addr) ? GetDefaultPort(a.GetNetwork()) : m_params.GetDefaultPort();
3126  }
3127  
3128  bool CConnman::Bind(const CService& addr_, unsigned int flags, NetPermissionFlags permissions)
3129  {
3130      const CService addr{MaybeFlipIPv6toCJDNS(addr_)};
3131  
3132      bilingual_str strError;
3133      if (!BindListenPort(addr, strError, permissions)) {
3134          if ((flags & BF_REPORT_ERROR) && m_client_interface) {
3135              m_client_interface->ThreadSafeMessageBox(strError, "", CClientUIInterface::MSG_ERROR);
3136          }
3137          return false;
3138      }
3139  
3140      if (addr.IsRoutable() && fDiscover && !(flags & BF_DONT_ADVERTISE) && !NetPermissions::HasFlag(permissions, NetPermissionFlags::NoBan)) {
3141          AddLocal(addr, LOCAL_BIND);
3142      }
3143  
3144      return true;
3145  }
3146  
3147  bool CConnman::InitBinds(const Options& options)
3148  {
3149      bool fBound = false;
3150      for (const auto& addrBind : options.vBinds) {
3151          fBound |= Bind(addrBind, BF_REPORT_ERROR, NetPermissionFlags::None);
3152      }
3153      for (const auto& addrBind : options.vWhiteBinds) {
3154          fBound |= Bind(addrBind.m_service, BF_REPORT_ERROR, addrBind.m_flags);
3155      }
3156      for (const auto& addr_bind : options.onion_binds) {
3157          fBound |= Bind(addr_bind, BF_DONT_ADVERTISE, NetPermissionFlags::None);
3158      }
3159      if (options.bind_on_any) {
3160          struct in_addr inaddr_any;
3161          inaddr_any.s_addr = htonl(INADDR_ANY);
3162          struct in6_addr inaddr6_any = IN6ADDR_ANY_INIT;
3163          fBound |= Bind(CService(inaddr6_any, GetListenPort()), BF_NONE, NetPermissionFlags::None);
3164          fBound |= Bind(CService(inaddr_any, GetListenPort()), !fBound ? BF_REPORT_ERROR : BF_NONE, NetPermissionFlags::None);
3165      }
3166      return fBound;
3167  }
3168  
3169  bool CConnman::Start(CScheduler& scheduler, const Options& connOptions)
3170  {
3171      AssertLockNotHeld(m_total_bytes_sent_mutex);
3172      Init(connOptions);
3173  
3174      if (fListen && !InitBinds(connOptions)) {
3175          if (m_client_interface) {
3176              m_client_interface->ThreadSafeMessageBox(
3177                  _("Failed to listen on any port. Use -listen=0 if you want this."),
3178                  "", CClientUIInterface::MSG_ERROR);
3179          }
3180          return false;
3181      }
3182  
3183      Proxy i2p_sam;
3184      if (GetProxy(NET_I2P, i2p_sam) && connOptions.m_i2p_accept_incoming) {
3185          m_i2p_sam_session = std::make_unique<i2p::sam::Session>(gArgs.GetDataDirNet() / "i2p_private_key",
3186                                                                  i2p_sam, &interruptNet);
3187      }
3188  
3189      for (const auto& strDest : connOptions.vSeedNodes) {
3190          AddAddrFetch(strDest);
3191      }
3192  
3193      if (m_use_addrman_outgoing) {
3194          // Load addresses from anchors.dat
3195          m_anchors = ReadAnchors(gArgs.GetDataDirNet() / ANCHORS_DATABASE_FILENAME);
3196          if (m_anchors.size() > MAX_BLOCK_RELAY_ONLY_ANCHORS) {
3197              m_anchors.resize(MAX_BLOCK_RELAY_ONLY_ANCHORS);
3198          }
3199          LogPrintf("%i block-relay-only anchors will be tried for connections.\n", m_anchors.size());
3200      }
3201  
3202      if (m_client_interface) {
3203          m_client_interface->InitMessage(_("Starting network threads…").translated);
3204      }
3205  
3206      fAddressesInitialized = true;
3207  
3208      if (semOutbound == nullptr) {
3209          // initialize semaphore
3210          semOutbound = std::make_unique<CSemaphore>(std::min(m_max_automatic_outbound, m_max_automatic_connections));
3211      }
3212      if (semAddnode == nullptr) {
3213          // initialize semaphore
3214          semAddnode = std::make_unique<CSemaphore>(m_max_addnode);
3215      }
3216  
3217      //
3218      // Start threads
3219      //
3220      assert(m_msgproc);
3221      interruptNet.reset();
3222      flagInterruptMsgProc = false;
3223  
3224      {
3225          LOCK(mutexMsgProc);
3226          fMsgProcWake = false;
3227      }
3228  
3229      // Send and receive from sockets, accept connections
3230      threadSocketHandler = std::thread(&util::TraceThread, "net", [this] { ThreadSocketHandler(); });
3231  
3232      if (!gArgs.GetBoolArg("-dnsseed", DEFAULT_DNSSEED))
3233          LogPrintf("DNS seeding disabled\n");
3234      else
3235          threadDNSAddressSeed = std::thread(&util::TraceThread, "dnsseed", [this] { ThreadDNSAddressSeed(); });
3236  
3237      // Initiate manual connections
3238      threadOpenAddedConnections = std::thread(&util::TraceThread, "addcon", [this] { ThreadOpenAddedConnections(); });
3239  
3240      if (connOptions.m_use_addrman_outgoing && !connOptions.m_specified_outgoing.empty()) {
3241          if (m_client_interface) {
3242              m_client_interface->ThreadSafeMessageBox(
3243                  _("Cannot provide specific connections and have addrman find outgoing connections at the same time."),
3244                  "", CClientUIInterface::MSG_ERROR);
3245          }
3246          return false;
3247      }
3248      if (connOptions.m_use_addrman_outgoing || !connOptions.m_specified_outgoing.empty()) {
3249          threadOpenConnections = std::thread(
3250              &util::TraceThread, "opencon",
3251              [this, connect = connOptions.m_specified_outgoing] { ThreadOpenConnections(connect); });
3252      }
3253  
3254      // Process messages
3255      threadMessageHandler = std::thread(&util::TraceThread, "msghand", [this] { ThreadMessageHandler(); });
3256  
3257      if (m_i2p_sam_session) {
3258          threadI2PAcceptIncoming =
3259              std::thread(&util::TraceThread, "i2paccept", [this] { ThreadI2PAcceptIncoming(); });
3260      }
3261  
3262      // Dump network addresses
3263      scheduler.scheduleEvery([this] { DumpAddresses(); }, DUMP_PEERS_INTERVAL);
3264  
3265      // Run the ASMap Health check once and then schedule it to run every 24h.
3266      if (m_netgroupman.UsingASMap()) {
3267          ASMapHealthCheck();
3268          scheduler.scheduleEvery([this] { ASMapHealthCheck(); }, ASMAP_HEALTH_CHECK_INTERVAL);
3269      }
3270  
3271      return true;
3272  }
3273  
3274  class CNetCleanup
3275  {
3276  public:
3277      CNetCleanup() = default;
3278  
3279      ~CNetCleanup()
3280      {
3281  #ifdef WIN32
3282          // Shutdown Windows Sockets
3283          WSACleanup();
3284  #endif
3285      }
3286  };
3287  static CNetCleanup instance_of_cnetcleanup;
3288  
3289  void CConnman::Interrupt()
3290  {
3291      {
3292          LOCK(mutexMsgProc);
3293          flagInterruptMsgProc = true;
3294      }
3295      condMsgProc.notify_all();
3296  
3297      interruptNet();
3298      g_socks5_interrupt();
3299  
3300      if (semOutbound) {
3301          for (int i=0; i<m_max_automatic_outbound; i++) {
3302              semOutbound->post();
3303          }
3304      }
3305  
3306      if (semAddnode) {
3307          for (int i=0; i<m_max_addnode; i++) {
3308              semAddnode->post();
3309          }
3310      }
3311  }
3312  
3313  void CConnman::StopThreads()
3314  {
3315      if (threadI2PAcceptIncoming.joinable()) {
3316          threadI2PAcceptIncoming.join();
3317      }
3318      if (threadMessageHandler.joinable())
3319          threadMessageHandler.join();
3320      if (threadOpenConnections.joinable())
3321          threadOpenConnections.join();
3322      if (threadOpenAddedConnections.joinable())
3323          threadOpenAddedConnections.join();
3324      if (threadDNSAddressSeed.joinable())
3325          threadDNSAddressSeed.join();
3326      if (threadSocketHandler.joinable())
3327          threadSocketHandler.join();
3328  }
3329  
3330  void CConnman::StopNodes()
3331  {
3332      if (fAddressesInitialized) {
3333          DumpAddresses();
3334          fAddressesInitialized = false;
3335  
3336          if (m_use_addrman_outgoing) {
3337              // Anchor connections are only dumped during clean shutdown.
3338              std::vector<CAddress> anchors_to_dump = GetCurrentBlockRelayOnlyConns();
3339              if (anchors_to_dump.size() > MAX_BLOCK_RELAY_ONLY_ANCHORS) {
3340                  anchors_to_dump.resize(MAX_BLOCK_RELAY_ONLY_ANCHORS);
3341              }
3342              DumpAnchors(gArgs.GetDataDirNet() / ANCHORS_DATABASE_FILENAME, anchors_to_dump);
3343          }
3344      }
3345  
3346      // Delete peer connections.
3347      std::vector<CNode*> nodes;
3348      WITH_LOCK(m_nodes_mutex, nodes.swap(m_nodes));
3349      for (CNode* pnode : nodes) {
3350          pnode->CloseSocketDisconnect();
3351          DeleteNode(pnode);
3352      }
3353  
3354      for (CNode* pnode : m_nodes_disconnected) {
3355          DeleteNode(pnode);
3356      }
3357      m_nodes_disconnected.clear();
3358      vhListenSocket.clear();
3359      semOutbound.reset();
3360      semAddnode.reset();
3361  }
3362  
3363  void CConnman::DeleteNode(CNode* pnode)
3364  {
3365      assert(pnode);
3366      m_msgproc->FinalizeNode(*pnode);
3367      delete pnode;
3368  }
3369  
3370  CConnman::~CConnman()
3371  {
3372      Interrupt();
3373      Stop();
3374  }
3375  
3376  std::vector<CAddress> CConnman::GetAddresses(size_t max_addresses, size_t max_pct, std::optional<Network> network, const bool filtered) const
3377  {
3378      std::vector<CAddress> addresses = addrman.GetAddr(max_addresses, max_pct, network, filtered);
3379      if (m_banman) {
3380          addresses.erase(std::remove_if(addresses.begin(), addresses.end(),
3381                          [this](const CAddress& addr){return m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr);}),
3382                          addresses.end());
3383      }
3384      return addresses;
3385  }
3386  
3387  std::vector<CAddress> CConnman::GetAddresses(CNode& requestor, size_t max_addresses, size_t max_pct)
3388  {
3389      auto local_socket_bytes = requestor.addrBind.GetAddrBytes();
3390      uint64_t cache_id = GetDeterministicRandomizer(RANDOMIZER_ID_ADDRCACHE)
3391          .Write(requestor.ConnectedThroughNetwork())
3392          .Write(local_socket_bytes)
3393          // For outbound connections, the port of the bound address is randomly
3394          // assigned by the OS and would therefore not be useful for seeding.
3395          .Write(requestor.IsInboundConn() ? requestor.addrBind.GetPort() : 0)
3396          .Finalize();
3397      const auto current_time = GetTime<std::chrono::microseconds>();
3398      auto r = m_addr_response_caches.emplace(cache_id, CachedAddrResponse{});
3399      CachedAddrResponse& cache_entry = r.first->second;
3400      if (cache_entry.m_cache_entry_expiration < current_time) { // If emplace() added new one it has expiration 0.
3401          cache_entry.m_addrs_response_cache = GetAddresses(max_addresses, max_pct, /*network=*/std::nullopt);
3402          // Choosing a proper cache lifetime is a trade-off between the privacy leak minimization
3403          // and the usefulness of ADDR responses to honest users.
3404          //
3405          // Longer cache lifetime makes it more difficult for an attacker to scrape
3406          // enough AddrMan data to maliciously infer something useful.
3407          // By the time an attacker scraped enough AddrMan records, most of
3408          // the records should be old enough to not leak topology info by
3409          // e.g. analyzing real-time changes in timestamps.
3410          //
3411          // It takes only several hundred requests to scrape everything from an AddrMan containing 100,000 nodes,
3412          // so ~24 hours of cache lifetime indeed makes the data less inferable by the time
3413          // most of it could be scraped (considering that timestamps are updated via
3414          // ADDR self-announcements and when nodes communicate).
3415          // We also should be robust to those attacks which may not require scraping *full* victim's AddrMan
3416          // (because even several timestamps of the same handful of nodes may leak privacy).
3417          //
3418          // On the other hand, longer cache lifetime makes ADDR responses
3419          // outdated and less useful for an honest requestor, e.g. if most nodes
3420          // in the ADDR response are no longer active.
3421          //
3422          // However, the churn in the network is known to be rather low. Since we consider
3423          // nodes to be "terrible" (see IsTerrible()) if the timestamps are older than 30 days,
3424          // max. 24 hours of "penalty" due to cache shouldn't make any meaningful difference
3425          // in terms of the freshness of the response.
3426          cache_entry.m_cache_entry_expiration = current_time + std::chrono::hours(21) + GetRandMillis(std::chrono::hours(6));
3427      }
3428      return cache_entry.m_addrs_response_cache;
3429  }
3430  
3431  bool CConnman::AddNode(const AddedNodeParams& add)
3432  {
3433      const CService resolved(LookupNumeric(add.m_added_node, GetDefaultPort(add.m_added_node)));
3434      const bool resolved_is_valid{resolved.IsValid()};
3435  
3436      LOCK(m_added_nodes_mutex);
3437      for (const auto& it : m_added_node_params) {
3438          if (add.m_added_node == it.m_added_node || (resolved_is_valid && resolved == LookupNumeric(it.m_added_node, GetDefaultPort(it.m_added_node)))) return false;
3439      }
3440  
3441      m_added_node_params.push_back(add);
3442      return true;
3443  }
3444  
3445  bool CConnman::RemoveAddedNode(const std::string& strNode)
3446  {
3447      LOCK(m_added_nodes_mutex);
3448      for (auto it = m_added_node_params.begin(); it != m_added_node_params.end(); ++it) {
3449          if (strNode == it->m_added_node) {
3450              m_added_node_params.erase(it);
3451              return true;
3452          }
3453      }
3454      return false;
3455  }
3456  
3457  bool CConnman::AddedNodesContain(const CAddress& addr) const
3458  {
3459      AssertLockNotHeld(m_added_nodes_mutex);
3460      const std::string addr_str{addr.ToStringAddr()};
3461      const std::string addr_port_str{addr.ToStringAddrPort()};
3462      LOCK(m_added_nodes_mutex);
3463      return (m_added_node_params.size() < 24 // bound the query to a reasonable limit
3464              && std::any_of(m_added_node_params.cbegin(), m_added_node_params.cend(),
3465                             [&](const auto& p) { return p.m_added_node == addr_str || p.m_added_node == addr_port_str; }));
3466  }
3467  
3468  size_t CConnman::GetNodeCount(ConnectionDirection flags) const
3469  {
3470      LOCK(m_nodes_mutex);
3471      if (flags == ConnectionDirection::Both) // Shortcut if we want total
3472          return m_nodes.size();
3473  
3474      int nNum = 0;
3475      for (const auto& pnode : m_nodes) {
3476          if (flags & (pnode->IsInboundConn() ? ConnectionDirection::In : ConnectionDirection::Out)) {
3477              nNum++;
3478          }
3479      }
3480  
3481      return nNum;
3482  }
3483  
3484  uint32_t CConnman::GetMappedAS(const CNetAddr& addr) const
3485  {
3486      return m_netgroupman.GetMappedAS(addr);
3487  }
3488  
3489  void CConnman::GetNodeStats(std::vector<CNodeStats>& vstats) const
3490  {
3491      vstats.clear();
3492      LOCK(m_nodes_mutex);
3493      vstats.reserve(m_nodes.size());
3494      for (CNode* pnode : m_nodes) {
3495          vstats.emplace_back();
3496          pnode->CopyStats(vstats.back());
3497          vstats.back().m_mapped_as = GetMappedAS(pnode->addr);
3498      }
3499  }
3500  
3501  bool CConnman::DisconnectNode(const std::string& strNode)
3502  {
3503      LOCK(m_nodes_mutex);
3504      if (CNode* pnode = FindNode(strNode)) {
3505          LogPrint(BCLog::NET, "disconnect by address%s matched peer=%d; disconnecting\n", (fLogIPs ? strprintf("=%s", strNode) : ""), pnode->GetId());
3506          pnode->fDisconnect = true;
3507          return true;
3508      }
3509      return false;
3510  }
3511  
3512  bool CConnman::DisconnectNode(const CSubNet& subnet)
3513  {
3514      bool disconnected = false;
3515      LOCK(m_nodes_mutex);
3516      for (CNode* pnode : m_nodes) {
3517          if (subnet.Match(pnode->addr)) {
3518              LogPrint(BCLog::NET, "disconnect by subnet%s matched peer=%d; disconnecting\n", (fLogIPs ? strprintf("=%s", subnet.ToString()) : ""), pnode->GetId());
3519              pnode->fDisconnect = true;
3520              disconnected = true;
3521          }
3522      }
3523      return disconnected;
3524  }
3525  
3526  bool CConnman::DisconnectNode(const CNetAddr& addr)
3527  {
3528      return DisconnectNode(CSubNet(addr));
3529  }
3530  
3531  bool CConnman::DisconnectNode(NodeId id)
3532  {
3533      LOCK(m_nodes_mutex);
3534      for(CNode* pnode : m_nodes) {
3535          if (id == pnode->GetId()) {
3536              LogPrint(BCLog::NET, "disconnect by id peer=%d; disconnecting\n", pnode->GetId());
3537              pnode->fDisconnect = true;
3538              return true;
3539          }
3540      }
3541      return false;
3542  }
3543  
3544  void CConnman::RecordBytesRecv(uint64_t bytes)
3545  {
3546      nTotalBytesRecv += bytes;
3547  }
3548  
3549  void CConnman::RecordBytesSent(uint64_t bytes)
3550  {
3551      AssertLockNotHeld(m_total_bytes_sent_mutex);
3552      LOCK(m_total_bytes_sent_mutex);
3553  
3554      nTotalBytesSent += bytes;
3555  
3556      const auto now = GetTime<std::chrono::seconds>();
3557      if (nMaxOutboundCycleStartTime + MAX_UPLOAD_TIMEFRAME < now)
3558      {
3559          // timeframe expired, reset cycle
3560          nMaxOutboundCycleStartTime = now;
3561          nMaxOutboundTotalBytesSentInCycle = 0;
3562      }
3563  
3564      nMaxOutboundTotalBytesSentInCycle += bytes;
3565  }
3566  
3567  uint64_t CConnman::GetMaxOutboundTarget() const
3568  {
3569      AssertLockNotHeld(m_total_bytes_sent_mutex);
3570      LOCK(m_total_bytes_sent_mutex);
3571      return nMaxOutboundLimit;
3572  }
3573  
3574  std::chrono::seconds CConnman::GetMaxOutboundTimeframe() const
3575  {
3576      return MAX_UPLOAD_TIMEFRAME;
3577  }
3578  
3579  std::chrono::seconds CConnman::GetMaxOutboundTimeLeftInCycle() const
3580  {
3581      AssertLockNotHeld(m_total_bytes_sent_mutex);
3582      LOCK(m_total_bytes_sent_mutex);
3583      return GetMaxOutboundTimeLeftInCycle_();
3584  }
3585  
3586  std::chrono::seconds CConnman::GetMaxOutboundTimeLeftInCycle_() const
3587  {
3588      AssertLockHeld(m_total_bytes_sent_mutex);
3589  
3590      if (nMaxOutboundLimit == 0)
3591          return 0s;
3592  
3593      if (nMaxOutboundCycleStartTime.count() == 0)
3594          return MAX_UPLOAD_TIMEFRAME;
3595  
3596      const std::chrono::seconds cycleEndTime = nMaxOutboundCycleStartTime + MAX_UPLOAD_TIMEFRAME;
3597      const auto now = GetTime<std::chrono::seconds>();
3598      return (cycleEndTime < now) ? 0s : cycleEndTime - now;
3599  }
3600  
3601  bool CConnman::OutboundTargetReached(bool historicalBlockServingLimit) const
3602  {
3603      AssertLockNotHeld(m_total_bytes_sent_mutex);
3604      LOCK(m_total_bytes_sent_mutex);
3605      if (nMaxOutboundLimit == 0)
3606          return false;
3607  
3608      if (historicalBlockServingLimit)
3609      {
3610          // keep a large enough buffer to at least relay each block once
3611          const std::chrono::seconds timeLeftInCycle = GetMaxOutboundTimeLeftInCycle_();
3612          const uint64_t buffer = timeLeftInCycle / std::chrono::minutes{10} * MAX_BLOCK_SERIALIZED_SIZE;
3613          if (buffer >= nMaxOutboundLimit || nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit - buffer)
3614              return true;
3615      }
3616      else if (nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit)
3617          return true;
3618  
3619      return false;
3620  }
3621  
3622  uint64_t CConnman::GetOutboundTargetBytesLeft() const
3623  {
3624      AssertLockNotHeld(m_total_bytes_sent_mutex);
3625      LOCK(m_total_bytes_sent_mutex);
3626      if (nMaxOutboundLimit == 0)
3627          return 0;
3628  
3629      return (nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit) ? 0 : nMaxOutboundLimit - nMaxOutboundTotalBytesSentInCycle;
3630  }
3631  
3632  uint64_t CConnman::GetTotalBytesRecv() const
3633  {
3634      return nTotalBytesRecv;
3635  }
3636  
3637  uint64_t CConnman::GetTotalBytesSent() const
3638  {
3639      AssertLockNotHeld(m_total_bytes_sent_mutex);
3640      LOCK(m_total_bytes_sent_mutex);
3641      return nTotalBytesSent;
3642  }
3643  
3644  ServiceFlags CConnman::GetLocalServices() const
3645  {
3646      return nLocalServices;
3647  }
3648  
3649  static std::unique_ptr<Transport> MakeTransport(NodeId id, bool use_v2transport, bool inbound) noexcept
3650  {
3651      if (use_v2transport) {
3652          return std::make_unique<V2Transport>(id, /*initiating=*/!inbound);
3653      } else {
3654          return std::make_unique<V1Transport>(id);
3655      }
3656  }
3657  
3658  CNode::CNode(NodeId idIn,
3659               std::shared_ptr<Sock> sock,
3660               const CAddress& addrIn,
3661               uint64_t nKeyedNetGroupIn,
3662               uint64_t nLocalHostNonceIn,
3663               const CAddress& addrBindIn,
3664               const std::string& addrNameIn,
3665               ConnectionType conn_type_in,
3666               bool inbound_onion,
3667               CNodeOptions&& node_opts)
3668      : m_transport{MakeTransport(idIn, node_opts.use_v2transport, conn_type_in == ConnectionType::INBOUND)},
3669        m_permission_flags{node_opts.permission_flags},
3670        m_sock{sock},
3671        m_connected{GetTime<std::chrono::seconds>()},
3672        addr{addrIn},
3673        addrBind{addrBindIn},
3674        m_addr_name{addrNameIn.empty() ? addr.ToStringAddrPort() : addrNameIn},
3675        m_dest(addrNameIn),
3676        m_inbound_onion{inbound_onion},
3677        m_prefer_evict{node_opts.prefer_evict},
3678        nKeyedNetGroup{nKeyedNetGroupIn},
3679        m_conn_type{conn_type_in},
3680        id{idIn},
3681        nLocalHostNonce{nLocalHostNonceIn},
3682        m_recv_flood_size{node_opts.recv_flood_size},
3683        m_i2p_sam_session{std::move(node_opts.i2p_sam_session)}
3684  {
3685      if (inbound_onion) assert(conn_type_in == ConnectionType::INBOUND);
3686  
3687      for (const std::string &msg : getAllNetMessageTypes())
3688          mapRecvBytesPerMsgType[msg] = 0;
3689      mapRecvBytesPerMsgType[NET_MESSAGE_TYPE_OTHER] = 0;
3690  
3691      if (fLogIPs) {
3692          LogPrint(BCLog::NET, "Added connection to %s peer=%d\n", m_addr_name, id);
3693      } else {
3694          LogPrint(BCLog::NET, "Added connection peer=%d\n", id);
3695      }
3696  }
3697  
3698  void CNode::MarkReceivedMsgsForProcessing()
3699  {
3700      AssertLockNotHeld(m_msg_process_queue_mutex);
3701  
3702      size_t nSizeAdded = 0;
3703      for (const auto& msg : vRecvMsg) {
3704          // vRecvMsg contains only completed CNetMessage
3705          // the single possible partially deserialized message are held by TransportDeserializer
3706          nSizeAdded += msg.m_raw_message_size;
3707      }
3708  
3709      LOCK(m_msg_process_queue_mutex);
3710      m_msg_process_queue.splice(m_msg_process_queue.end(), vRecvMsg);
3711      m_msg_process_queue_size += nSizeAdded;
3712      fPauseRecv = m_msg_process_queue_size > m_recv_flood_size;
3713  }
3714  
3715  std::optional<std::pair<CNetMessage, bool>> CNode::PollMessage()
3716  {
3717      LOCK(m_msg_process_queue_mutex);
3718      if (m_msg_process_queue.empty()) return std::nullopt;
3719  
3720      std::list<CNetMessage> msgs;
3721      // Just take one message
3722      msgs.splice(msgs.begin(), m_msg_process_queue, m_msg_process_queue.begin());
3723      m_msg_process_queue_size -= msgs.front().m_raw_message_size;
3724      fPauseRecv = m_msg_process_queue_size > m_recv_flood_size;
3725  
3726      return std::make_pair(std::move(msgs.front()), !m_msg_process_queue.empty());
3727  }
3728  
3729  bool CConnman::NodeFullyConnected(const CNode* pnode)
3730  {
3731      return pnode && pnode->fSuccessfullyConnected && !pnode->fDisconnect;
3732  }
3733  
3734  void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg)
3735  {
3736      AssertLockNotHeld(m_total_bytes_sent_mutex);
3737      size_t nMessageSize = msg.data.size();
3738      LogPrint(BCLog::NET, "sending %s (%d bytes) peer=%d\n", msg.m_type, nMessageSize, pnode->GetId());
3739      if (gArgs.GetBoolArg("-capturemessages", false)) {
3740          CaptureMessage(pnode->addr, msg.m_type, msg.data, /*is_incoming=*/false);
3741      }
3742  
3743      TRACE6(net, outbound_message,
3744          pnode->GetId(),
3745          pnode->m_addr_name.c_str(),
3746          pnode->ConnectionTypeAsString().c_str(),
3747          msg.m_type.c_str(),
3748          msg.data.size(),
3749          msg.data.data()
3750      );
3751  
3752      size_t nBytesSent = 0;
3753      {
3754          LOCK(pnode->cs_vSend);
3755          // Check if the transport still has unsent bytes, and indicate to it that we're about to
3756          // give it a message to send.
3757          const auto& [to_send, more, _msg_type] =
3758              pnode->m_transport->GetBytesToSend(/*have_next_message=*/true);
3759          const bool queue_was_empty{to_send.empty() && pnode->vSendMsg.empty()};
3760  
3761          // Update memory usage of send buffer.
3762          pnode->m_send_memusage += msg.GetMemoryUsage();
3763          if (pnode->m_send_memusage + pnode->m_transport->GetSendMemoryUsage() > nSendBufferMaxSize) pnode->fPauseSend = true;
3764          // Move message to vSendMsg queue.
3765          pnode->vSendMsg.push_back(std::move(msg));
3766  
3767          // If there was nothing to send before, and there is now (predicted by the "more" value
3768          // returned by the GetBytesToSend call above), attempt "optimistic write":
3769          // because the poll/select loop may pause for SELECT_TIMEOUT_MILLISECONDS before actually
3770          // doing a send, try sending from the calling thread if the queue was empty before.
3771          // With a V1Transport, more will always be true here, because adding a message always
3772          // results in sendable bytes there, but with V2Transport this is not the case (it may
3773          // still be in the handshake).
3774          if (queue_was_empty && more) {
3775              std::tie(nBytesSent, std::ignore) = SocketSendData(*pnode);
3776          }
3777      }
3778      if (nBytesSent) RecordBytesSent(nBytesSent);
3779  }
3780  
3781  bool CConnman::ForNode(NodeId id, std::function<bool(CNode* pnode)> func)
3782  {
3783      CNode* found = nullptr;
3784      LOCK(m_nodes_mutex);
3785      for (auto&& pnode : m_nodes) {
3786          if(pnode->GetId() == id) {
3787              found = pnode;
3788              break;
3789          }
3790      }
3791      return found != nullptr && NodeFullyConnected(found) && func(found);
3792  }
3793  
3794  CSipHasher CConnman::GetDeterministicRandomizer(uint64_t id) const
3795  {
3796      return CSipHasher(nSeed0, nSeed1).Write(id);
3797  }
3798  
3799  uint64_t CConnman::CalculateKeyedNetGroup(const CAddress& address) const
3800  {
3801      std::vector<unsigned char> vchNetGroup(m_netgroupman.GetGroup(address));
3802  
3803      return GetDeterministicRandomizer(RANDOMIZER_ID_NETGROUP).Write(vchNetGroup).Finalize();
3804  }
3805  
3806  void CConnman::PerformReconnections()
3807  {
3808      AssertLockNotHeld(m_reconnections_mutex);
3809      AssertLockNotHeld(m_unused_i2p_sessions_mutex);
3810      while (true) {
3811          // Move first element of m_reconnections to todo (avoiding an allocation inside the lock).
3812          decltype(m_reconnections) todo;
3813          {
3814              LOCK(m_reconnections_mutex);
3815              if (m_reconnections.empty()) break;
3816              todo.splice(todo.end(), m_reconnections, m_reconnections.begin());
3817          }
3818  
3819          auto& item = *todo.begin();
3820          OpenNetworkConnection(item.addr_connect,
3821                                // We only reconnect if the first attempt to connect succeeded at
3822                                // connection time, but then failed after the CNode object was
3823                                // created. Since we already know connecting is possible, do not
3824                                // count failure to reconnect.
3825                                /*fCountFailure=*/false,
3826                                std::move(item.grant),
3827                                item.destination.empty() ? nullptr : item.destination.c_str(),
3828                                item.conn_type,
3829                                item.use_v2transport);
3830      }
3831  }
3832  
3833  void CConnman::ASMapHealthCheck()
3834  {
3835      const std::vector<CAddress> v4_addrs{GetAddresses(/*max_addresses=*/ 0, /*max_pct=*/ 0, Network::NET_IPV4, /*filtered=*/ false)};
3836      const std::vector<CAddress> v6_addrs{GetAddresses(/*max_addresses=*/ 0, /*max_pct=*/ 0, Network::NET_IPV6, /*filtered=*/ false)};
3837      std::vector<CNetAddr> clearnet_addrs;
3838      clearnet_addrs.reserve(v4_addrs.size() + v6_addrs.size());
3839      std::transform(v4_addrs.begin(), v4_addrs.end(), std::back_inserter(clearnet_addrs),
3840          [](const CAddress& addr) { return static_cast<CNetAddr>(addr); });
3841      std::transform(v6_addrs.begin(), v6_addrs.end(), std::back_inserter(clearnet_addrs),
3842          [](const CAddress& addr) { return static_cast<CNetAddr>(addr); });
3843      m_netgroupman.ASMapHealthCheck(clearnet_addrs);
3844  }
3845  
3846  // Dump binary message to file, with timestamp.
3847  static void CaptureMessageToFile(const CAddress& addr,
3848                                   const std::string& msg_type,
3849                                   Span<const unsigned char> data,
3850                                   bool is_incoming)
3851  {
3852      // Note: This function captures the message at the time of processing,
3853      // not at socket receive/send time.
3854      // This ensures that the messages are always in order from an application
3855      // layer (processing) perspective.
3856      auto now = GetTime<std::chrono::microseconds>();
3857  
3858      // Windows folder names cannot include a colon
3859      std::string clean_addr = addr.ToStringAddrPort();
3860      std::replace(clean_addr.begin(), clean_addr.end(), ':', '_');
3861  
3862      fs::path base_path = gArgs.GetDataDirNet() / "message_capture" / fs::u8path(clean_addr);
3863      fs::create_directories(base_path);
3864  
3865      fs::path path = base_path / (is_incoming ? "msgs_recv.dat" : "msgs_sent.dat");
3866      AutoFile f{fsbridge::fopen(path, "ab")};
3867  
3868      ser_writedata64(f, now.count());
3869      f << Span{msg_type};
3870      for (auto i = msg_type.length(); i < CMessageHeader::COMMAND_SIZE; ++i) {
3871          f << uint8_t{'\0'};
3872      }
3873      uint32_t size = data.size();
3874      ser_writedata32(f, size);
3875      f << data;
3876  }
3877  
3878  std::function<void(const CAddress& addr,
3879                     const std::string& msg_type,
3880                     Span<const unsigned char> data,
3881                     bool is_incoming)>
3882      CaptureMessage = CaptureMessageToFile;