net.cpp
1 // Copyright (c) 2009-2010 Satoshi Nakamoto 2 // Copyright (c) 2009-present The Bitcoin Core developers 3 // Distributed under the MIT software license, see the accompanying 4 // file COPYING or http://www.opensource.org/licenses/mit-license.php. 5 6 #include <bitcoin-build-config.h> // IWYU pragma: keep 7 8 #include <net.h> 9 10 #include <addrdb.h> 11 #include <addrman.h> 12 #include <banman.h> 13 #include <clientversion.h> 14 #include <common/args.h> 15 #include <common/netif.h> 16 #include <compat/compat.h> 17 #include <consensus/consensus.h> 18 #include <crypto/sha256.h> 19 #include <i2p.h> 20 #include <key.h> 21 #include <logging.h> 22 #include <memusage.h> 23 #include <net_permissions.h> 24 #include <netaddress.h> 25 #include <netbase.h> 26 #include <node/eviction.h> 27 #include <node/interface_ui.h> 28 #include <protocol.h> 29 #include <random.h> 30 #include <scheduler.h> 31 #include <util/fs.h> 32 #include <util/sock.h> 33 #include <util/strencodings.h> 34 #include <util/thread.h> 35 #include <util/threadinterrupt.h> 36 #include <util/trace.h> 37 #include <util/translation.h> 38 #include <util/vector.h> 39 40 #ifdef WIN32 41 #include <string.h> 42 #endif 43 44 #if HAVE_DECL_GETIFADDRS && HAVE_DECL_FREEIFADDRS 45 #include <ifaddrs.h> 46 #endif 47 48 #include <algorithm> 49 #include <array> 50 #include <cmath> 51 #include <cstdint> 52 #include <functional> 53 #include <optional> 54 #include <unordered_map> 55 56 TRACEPOINT_SEMAPHORE(net, closed_connection); 57 TRACEPOINT_SEMAPHORE(net, evicted_inbound_connection); 58 TRACEPOINT_SEMAPHORE(net, inbound_connection); 59 TRACEPOINT_SEMAPHORE(net, outbound_connection); 60 TRACEPOINT_SEMAPHORE(net, outbound_message); 61 62 /** Maximum number of block-relay-only anchor connections */ 63 static constexpr size_t MAX_BLOCK_RELAY_ONLY_ANCHORS = 2; 64 static_assert (MAX_BLOCK_RELAY_ONLY_ANCHORS <= static_cast<size_t>(MAX_BLOCK_RELAY_ONLY_CONNECTIONS), "MAX_BLOCK_RELAY_ONLY_ANCHORS must not exceed MAX_BLOCK_RELAY_ONLY_CONNECTIONS."); 65 /** Anchor IP address database file name */ 66 const char* const ANCHORS_DATABASE_FILENAME = "anchors.dat"; 67 68 // How often to dump addresses to peers.dat 69 static constexpr std::chrono::minutes DUMP_PEERS_INTERVAL{15}; 70 71 /** Number of DNS seeds to query when the number of connections is low. */ 72 static constexpr int DNSSEEDS_TO_QUERY_AT_ONCE = 3; 73 74 /** Minimum number of outbound connections under which we will keep fetching our address seeds. */ 75 static constexpr int SEED_OUTBOUND_CONNECTION_THRESHOLD = 2; 76 77 /** How long to delay before querying DNS seeds 78 * 79 * If we have more than THRESHOLD entries in addrman, then it's likely 80 * that we got those addresses from having previously connected to the P2P 81 * network, and that we'll be able to successfully reconnect to the P2P 82 * network via contacting one of them. So if that's the case, spend a 83 * little longer trying to connect to known peers before querying the 84 * DNS seeds. 85 */ 86 static constexpr std::chrono::seconds DNSSEEDS_DELAY_FEW_PEERS{11}; 87 static constexpr std::chrono::minutes DNSSEEDS_DELAY_MANY_PEERS{5}; 88 static constexpr int DNSSEEDS_DELAY_PEER_THRESHOLD = 1000; // "many" vs "few" peers 89 90 /** The default timeframe for -maxuploadtarget. 1 day. */ 91 static constexpr std::chrono::seconds MAX_UPLOAD_TIMEFRAME{60 * 60 * 24}; 92 93 // A random time period (0 to 1 seconds) is added to feeler connections to prevent synchronization. 94 static constexpr auto FEELER_SLEEP_WINDOW{1s}; 95 96 /** Frequency to attempt extra connections to reachable networks we're not connected to yet **/ 97 static constexpr auto EXTRA_NETWORK_PEER_INTERVAL{5min}; 98 99 /** Used to pass flags to the Bind() function */ 100 enum BindFlags { 101 BF_NONE = 0, 102 BF_REPORT_ERROR = (1U << 0), 103 /** 104 * Do not call AddLocal() for our special addresses, e.g., for incoming 105 * Tor connections, to prevent gossiping them over the network. 106 */ 107 BF_DONT_ADVERTISE = (1U << 1), 108 }; 109 110 // The set of sockets cannot be modified while waiting 111 // The sleep time needs to be small to avoid new sockets stalling 112 static const uint64_t SELECT_TIMEOUT_MILLISECONDS = 50; 113 114 const std::string NET_MESSAGE_TYPE_OTHER = "*other*"; 115 116 static const uint64_t RANDOMIZER_ID_NETGROUP = 0x6c0edd8036ef4036ULL; // SHA256("netgroup")[0:8] 117 static const uint64_t RANDOMIZER_ID_LOCALHOSTNONCE = 0xd93e69e2bbfa5735ULL; // SHA256("localhostnonce")[0:8] 118 static const uint64_t RANDOMIZER_ID_ADDRCACHE = 0x1cf2e4ddd306dda9ULL; // SHA256("addrcache")[0:8] 119 // 120 // Global state variables 121 // 122 bool fDiscover = true; 123 bool fListen = true; 124 GlobalMutex g_maplocalhost_mutex; 125 std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(g_maplocalhost_mutex); 126 std::string strSubVersion; 127 128 size_t CSerializedNetMsg::GetMemoryUsage() const noexcept 129 { 130 return sizeof(*this) + memusage::DynamicUsage(m_type) + memusage::DynamicUsage(data); 131 } 132 133 size_t CNetMessage::GetMemoryUsage() const noexcept 134 { 135 return sizeof(*this) + memusage::DynamicUsage(m_type) + m_recv.GetMemoryUsage(); 136 } 137 138 void CConnman::AddAddrFetch(const std::string& strDest) 139 { 140 LOCK(m_addr_fetches_mutex); 141 m_addr_fetches.push_back(strDest); 142 } 143 144 uint16_t GetListenPort() 145 { 146 // If -bind= is provided with ":port" part, use that (first one if multiple are provided). 147 for (const std::string& bind_arg : gArgs.GetArgs("-bind")) { 148 constexpr uint16_t dummy_port = 0; 149 150 const std::optional<CService> bind_addr{Lookup(bind_arg, dummy_port, /*fAllowLookup=*/false)}; 151 if (bind_addr.has_value() && bind_addr->GetPort() != dummy_port) return bind_addr->GetPort(); 152 } 153 154 // Otherwise, if -whitebind= without NetPermissionFlags::NoBan is provided, use that 155 // (-whitebind= is required to have ":port"). 156 for (const std::string& whitebind_arg : gArgs.GetArgs("-whitebind")) { 157 NetWhitebindPermissions whitebind; 158 bilingual_str error; 159 if (NetWhitebindPermissions::TryParse(whitebind_arg, whitebind, error)) { 160 if (!NetPermissions::HasFlag(whitebind.m_flags, NetPermissionFlags::NoBan)) { 161 return whitebind.m_service.GetPort(); 162 } 163 } 164 } 165 166 // Otherwise, if -port= is provided, use that. Otherwise use the default port. 167 return static_cast<uint16_t>(gArgs.GetIntArg("-port", Params().GetDefaultPort())); 168 } 169 170 // Determine the "best" local address for a particular peer. 171 [[nodiscard]] static std::optional<CService> GetLocal(const CNode& peer) 172 { 173 if (!fListen) return std::nullopt; 174 175 std::optional<CService> addr; 176 int nBestScore = -1; 177 int nBestReachability = -1; 178 { 179 LOCK(g_maplocalhost_mutex); 180 for (const auto& [local_addr, local_service_info] : mapLocalHost) { 181 // For privacy reasons, don't advertise our privacy-network address 182 // to other networks and don't advertise our other-network address 183 // to privacy networks. 184 if (local_addr.GetNetwork() != peer.ConnectedThroughNetwork() 185 && (local_addr.IsPrivacyNet() || peer.IsConnectedThroughPrivacyNet())) { 186 continue; 187 } 188 const int nScore{local_service_info.nScore}; 189 const int nReachability{local_addr.GetReachabilityFrom(peer.addr)}; 190 if (nReachability > nBestReachability || (nReachability == nBestReachability && nScore > nBestScore)) { 191 addr.emplace(CService{local_addr, local_service_info.nPort}); 192 nBestReachability = nReachability; 193 nBestScore = nScore; 194 } 195 } 196 } 197 return addr; 198 } 199 200 //! Convert the serialized seeds into usable address objects. 201 static std::vector<CAddress> ConvertSeeds(const std::vector<uint8_t> &vSeedsIn) 202 { 203 // It'll only connect to one or two seed nodes because once it connects, 204 // it'll get a pile of addresses with newer timestamps. 205 // Seed nodes are given a random 'last seen time' of between one and two 206 // weeks ago. 207 const auto one_week{7 * 24h}; 208 std::vector<CAddress> vSeedsOut; 209 FastRandomContext rng; 210 ParamsStream s{DataStream{vSeedsIn}, CAddress::V2_NETWORK}; 211 while (!s.eof()) { 212 CService endpoint; 213 s >> endpoint; 214 CAddress addr{endpoint, SeedsServiceFlags()}; 215 addr.nTime = rng.rand_uniform_delay(Now<NodeSeconds>() - one_week, -one_week); 216 LogDebug(BCLog::NET, "Added hardcoded seed: %s\n", addr.ToStringAddrPort()); 217 vSeedsOut.push_back(addr); 218 } 219 return vSeedsOut; 220 } 221 222 // Determine the "best" local address for a particular peer. 223 // If none, return the unroutable 0.0.0.0 but filled in with 224 // the normal parameters, since the IP may be changed to a useful 225 // one by discovery. 226 CService GetLocalAddress(const CNode& peer) 227 { 228 return GetLocal(peer).value_or(CService{CNetAddr(), GetListenPort()}); 229 } 230 231 static int GetnScore(const CService& addr) 232 { 233 LOCK(g_maplocalhost_mutex); 234 const auto it = mapLocalHost.find(addr); 235 return (it != mapLocalHost.end()) ? it->second.nScore : 0; 236 } 237 238 // Is our peer's addrLocal potentially useful as an external IP source? 239 [[nodiscard]] static bool IsPeerAddrLocalGood(CNode *pnode) 240 { 241 CService addrLocal = pnode->GetAddrLocal(); 242 return fDiscover && pnode->addr.IsRoutable() && addrLocal.IsRoutable() && 243 g_reachable_nets.Contains(addrLocal); 244 } 245 246 std::optional<CService> GetLocalAddrForPeer(CNode& node) 247 { 248 CService addrLocal{GetLocalAddress(node)}; 249 // If discovery is enabled, sometimes give our peer the address it 250 // tells us that it sees us as in case it has a better idea of our 251 // address than we do. 252 FastRandomContext rng; 253 if (IsPeerAddrLocalGood(&node) && (!addrLocal.IsRoutable() || 254 rng.randbits((GetnScore(addrLocal) > LOCAL_MANUAL) ? 3 : 1) == 0)) 255 { 256 if (node.IsInboundConn()) { 257 // For inbound connections, assume both the address and the port 258 // as seen from the peer. 259 addrLocal = CService{node.GetAddrLocal()}; 260 } else { 261 // For outbound connections, assume just the address as seen from 262 // the peer and leave the port in `addrLocal` as returned by 263 // `GetLocalAddress()` above. The peer has no way to observe our 264 // listening port when we have initiated the connection. 265 addrLocal.SetIP(node.GetAddrLocal()); 266 } 267 } 268 if (addrLocal.IsRoutable()) { 269 LogDebug(BCLog::NET, "Advertising address %s to peer=%d\n", addrLocal.ToStringAddrPort(), node.GetId()); 270 return addrLocal; 271 } 272 // Address is unroutable. Don't advertise. 273 return std::nullopt; 274 } 275 276 // learn a new local address 277 bool AddLocal(const CService& addr_, int nScore) 278 { 279 CService addr{MaybeFlipIPv6toCJDNS(addr_)}; 280 281 if (!addr.IsRoutable()) 282 return false; 283 284 if (!fDiscover && nScore < LOCAL_MANUAL) 285 return false; 286 287 if (!g_reachable_nets.Contains(addr)) 288 return false; 289 290 LogPrintf("AddLocal(%s,%i)\n", addr.ToStringAddrPort(), nScore); 291 292 { 293 LOCK(g_maplocalhost_mutex); 294 const auto [it, is_newly_added] = mapLocalHost.emplace(addr, LocalServiceInfo()); 295 LocalServiceInfo &info = it->second; 296 if (is_newly_added || nScore >= info.nScore) { 297 info.nScore = nScore + (is_newly_added ? 0 : 1); 298 info.nPort = addr.GetPort(); 299 } 300 } 301 302 return true; 303 } 304 305 bool AddLocal(const CNetAddr &addr, int nScore) 306 { 307 return AddLocal(CService(addr, GetListenPort()), nScore); 308 } 309 310 void RemoveLocal(const CService& addr) 311 { 312 LOCK(g_maplocalhost_mutex); 313 LogPrintf("RemoveLocal(%s)\n", addr.ToStringAddrPort()); 314 mapLocalHost.erase(addr); 315 } 316 317 /** vote for a local address */ 318 bool SeenLocal(const CService& addr) 319 { 320 LOCK(g_maplocalhost_mutex); 321 const auto it = mapLocalHost.find(addr); 322 if (it == mapLocalHost.end()) return false; 323 ++it->second.nScore; 324 return true; 325 } 326 327 328 /** check whether a given address is potentially local */ 329 bool IsLocal(const CService& addr) 330 { 331 LOCK(g_maplocalhost_mutex); 332 return mapLocalHost.count(addr) > 0; 333 } 334 335 CNode* CConnman::FindNode(const CNetAddr& ip) 336 { 337 LOCK(m_nodes_mutex); 338 for (CNode* pnode : m_nodes) { 339 if (static_cast<CNetAddr>(pnode->addr) == ip) { 340 return pnode; 341 } 342 } 343 return nullptr; 344 } 345 346 CNode* CConnman::FindNode(const std::string& addrName) 347 { 348 LOCK(m_nodes_mutex); 349 for (CNode* pnode : m_nodes) { 350 if (pnode->m_addr_name == addrName) { 351 return pnode; 352 } 353 } 354 return nullptr; 355 } 356 357 CNode* CConnman::FindNode(const CService& addr) 358 { 359 LOCK(m_nodes_mutex); 360 for (CNode* pnode : m_nodes) { 361 if (static_cast<CService>(pnode->addr) == addr) { 362 return pnode; 363 } 364 } 365 return nullptr; 366 } 367 368 bool CConnman::AlreadyConnectedToAddress(const CAddress& addr) 369 { 370 return FindNode(static_cast<CNetAddr>(addr)); 371 } 372 373 bool CConnman::CheckIncomingNonce(uint64_t nonce) 374 { 375 LOCK(m_nodes_mutex); 376 for (const CNode* pnode : m_nodes) { 377 if (!pnode->fSuccessfullyConnected && !pnode->IsInboundConn() && pnode->GetLocalNonce() == nonce) 378 return false; 379 } 380 return true; 381 } 382 383 /** Get the bind address for a socket as CService. */ 384 static CService GetBindAddress(const Sock& sock) 385 { 386 CService addr_bind; 387 struct sockaddr_storage sockaddr_bind; 388 socklen_t sockaddr_bind_len = sizeof(sockaddr_bind); 389 if (!sock.GetSockName((struct sockaddr*)&sockaddr_bind, &sockaddr_bind_len)) { 390 addr_bind.SetSockAddr((const struct sockaddr*)&sockaddr_bind, sockaddr_bind_len); 391 } else { 392 LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "getsockname failed\n"); 393 } 394 return addr_bind; 395 } 396 397 CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type, bool use_v2transport) 398 { 399 AssertLockNotHeld(m_unused_i2p_sessions_mutex); 400 assert(conn_type != ConnectionType::INBOUND); 401 402 if (pszDest == nullptr) { 403 if (IsLocal(addrConnect)) 404 return nullptr; 405 406 // Look for an existing connection 407 CNode* pnode = FindNode(static_cast<CService>(addrConnect)); 408 if (pnode) 409 { 410 LogPrintf("Failed to open new connection, already connected\n"); 411 return nullptr; 412 } 413 } 414 415 LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "trying %s connection %s lastseen=%.1fhrs\n", 416 use_v2transport ? "v2" : "v1", 417 pszDest ? pszDest : addrConnect.ToStringAddrPort(), 418 Ticks<HoursDouble>(pszDest ? 0h : Now<NodeSeconds>() - addrConnect.nTime)); 419 420 // Resolve 421 const uint16_t default_port{pszDest != nullptr ? GetDefaultPort(pszDest) : 422 m_params.GetDefaultPort()}; 423 424 // Collection of addresses to try to connect to: either all dns resolved addresses if a domain name (pszDest) is provided, or addrConnect otherwise. 425 std::vector<CAddress> connect_to{}; 426 if (pszDest) { 427 std::vector<CService> resolved{Lookup(pszDest, default_port, fNameLookup && !HaveNameProxy(), 256)}; 428 if (!resolved.empty()) { 429 std::shuffle(resolved.begin(), resolved.end(), FastRandomContext()); 430 // If the connection is made by name, it can be the case that the name resolves to more than one address. 431 // We don't want to connect any more of them if we are already connected to one 432 for (const auto& r : resolved) { 433 addrConnect = CAddress{MaybeFlipIPv6toCJDNS(r), NODE_NONE}; 434 if (!addrConnect.IsValid()) { 435 LogDebug(BCLog::NET, "Resolver returned invalid address %s for %s\n", addrConnect.ToStringAddrPort(), pszDest); 436 return nullptr; 437 } 438 // It is possible that we already have a connection to the IP/port pszDest resolved to. 439 // In that case, drop the connection that was just created. 440 LOCK(m_nodes_mutex); 441 CNode* pnode = FindNode(static_cast<CService>(addrConnect)); 442 if (pnode) { 443 LogPrintf("Not opening a connection to %s, already connected to %s\n", pszDest, addrConnect.ToStringAddrPort()); 444 return nullptr; 445 } 446 // Add the address to the resolved addresses vector so we can try to connect to it later on 447 connect_to.push_back(addrConnect); 448 } 449 } else { 450 // For resolution via proxy 451 connect_to.push_back(addrConnect); 452 } 453 } else { 454 // Connect via addrConnect directly 455 connect_to.push_back(addrConnect); 456 } 457 458 // Connect 459 std::unique_ptr<Sock> sock; 460 Proxy proxy; 461 CService addr_bind; 462 assert(!addr_bind.IsValid()); 463 std::unique_ptr<i2p::sam::Session> i2p_transient_session; 464 465 for (auto& target_addr: connect_to) { 466 if (target_addr.IsValid()) { 467 const bool use_proxy{GetProxy(target_addr.GetNetwork(), proxy)}; 468 bool proxyConnectionFailed = false; 469 470 if (target_addr.IsI2P() && use_proxy) { 471 i2p::Connection conn; 472 bool connected{false}; 473 474 if (m_i2p_sam_session) { 475 connected = m_i2p_sam_session->Connect(target_addr, conn, proxyConnectionFailed); 476 } else { 477 { 478 LOCK(m_unused_i2p_sessions_mutex); 479 if (m_unused_i2p_sessions.empty()) { 480 i2p_transient_session = 481 std::make_unique<i2p::sam::Session>(proxy, &interruptNet); 482 } else { 483 i2p_transient_session.swap(m_unused_i2p_sessions.front()); 484 m_unused_i2p_sessions.pop(); 485 } 486 } 487 connected = i2p_transient_session->Connect(target_addr, conn, proxyConnectionFailed); 488 if (!connected) { 489 LOCK(m_unused_i2p_sessions_mutex); 490 if (m_unused_i2p_sessions.size() < MAX_UNUSED_I2P_SESSIONS_SIZE) { 491 m_unused_i2p_sessions.emplace(i2p_transient_session.release()); 492 } 493 } 494 } 495 496 if (connected) { 497 sock = std::move(conn.sock); 498 addr_bind = conn.me; 499 } 500 } else if (use_proxy) { 501 LogPrintLevel(BCLog::PROXY, BCLog::Level::Debug, "Using proxy: %s to connect to %s\n", proxy.ToString(), target_addr.ToStringAddrPort()); 502 sock = ConnectThroughProxy(proxy, target_addr.ToStringAddr(), target_addr.GetPort(), proxyConnectionFailed); 503 } else { 504 // no proxy needed (none set for target network) 505 sock = ConnectDirectly(target_addr, conn_type == ConnectionType::MANUAL); 506 } 507 if (!proxyConnectionFailed) { 508 // If a connection to the node was attempted, and failure (if any) is not caused by a problem connecting to 509 // the proxy, mark this as an attempt. 510 addrman.Attempt(target_addr, fCountFailure); 511 } 512 } else if (pszDest && GetNameProxy(proxy)) { 513 std::string host; 514 uint16_t port{default_port}; 515 SplitHostPort(std::string(pszDest), port, host); 516 bool proxyConnectionFailed; 517 sock = ConnectThroughProxy(proxy, host, port, proxyConnectionFailed); 518 } 519 // Check any other resolved address (if any) if we fail to connect 520 if (!sock) { 521 continue; 522 } 523 524 NetPermissionFlags permission_flags = NetPermissionFlags::None; 525 std::vector<NetWhitelistPermissions> whitelist_permissions = conn_type == ConnectionType::MANUAL ? vWhitelistedRangeOutgoing : std::vector<NetWhitelistPermissions>{}; 526 AddWhitelistPermissionFlags(permission_flags, target_addr, whitelist_permissions); 527 528 // Add node 529 NodeId id = GetNewNodeId(); 530 uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize(); 531 if (!addr_bind.IsValid()) { 532 addr_bind = GetBindAddress(*sock); 533 } 534 CNode* pnode = new CNode(id, 535 std::move(sock), 536 target_addr, 537 CalculateKeyedNetGroup(target_addr), 538 nonce, 539 addr_bind, 540 pszDest ? pszDest : "", 541 conn_type, 542 /*inbound_onion=*/false, 543 CNodeOptions{ 544 .permission_flags = permission_flags, 545 .i2p_sam_session = std::move(i2p_transient_session), 546 .recv_flood_size = nReceiveFloodSize, 547 .use_v2transport = use_v2transport, 548 }); 549 pnode->AddRef(); 550 551 // We're making a new connection, harvest entropy from the time (and our peer count) 552 RandAddEvent((uint32_t)id); 553 554 return pnode; 555 } 556 557 return nullptr; 558 } 559 560 void CNode::CloseSocketDisconnect() 561 { 562 fDisconnect = true; 563 LOCK(m_sock_mutex); 564 if (m_sock) { 565 LogDebug(BCLog::NET, "Resetting socket for peer=%d%s", GetId(), LogIP(fLogIPs)); 566 m_sock.reset(); 567 568 TRACEPOINT(net, closed_connection, 569 GetId(), 570 m_addr_name.c_str(), 571 ConnectionTypeAsString().c_str(), 572 ConnectedThroughNetwork(), 573 Ticks<std::chrono::seconds>(m_connected)); 574 } 575 m_i2p_sam_session.reset(); 576 } 577 578 void CConnman::AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr, const std::vector<NetWhitelistPermissions>& ranges) const { 579 for (const auto& subnet : ranges) { 580 if (subnet.m_subnet.Match(addr)) { 581 NetPermissions::AddFlag(flags, subnet.m_flags); 582 } 583 } 584 if (NetPermissions::HasFlag(flags, NetPermissionFlags::Implicit)) { 585 NetPermissions::ClearFlag(flags, NetPermissionFlags::Implicit); 586 if (whitelist_forcerelay) NetPermissions::AddFlag(flags, NetPermissionFlags::ForceRelay); 587 if (whitelist_relay) NetPermissions::AddFlag(flags, NetPermissionFlags::Relay); 588 NetPermissions::AddFlag(flags, NetPermissionFlags::Mempool); 589 NetPermissions::AddFlag(flags, NetPermissionFlags::NoBan); 590 } 591 } 592 593 CService CNode::GetAddrLocal() const 594 { 595 AssertLockNotHeld(m_addr_local_mutex); 596 LOCK(m_addr_local_mutex); 597 return m_addr_local; 598 } 599 600 void CNode::SetAddrLocal(const CService& addrLocalIn) { 601 AssertLockNotHeld(m_addr_local_mutex); 602 LOCK(m_addr_local_mutex); 603 if (Assume(!m_addr_local.IsValid())) { // Addr local can only be set once during version msg processing 604 m_addr_local = addrLocalIn; 605 } 606 } 607 608 Network CNode::ConnectedThroughNetwork() const 609 { 610 return m_inbound_onion ? NET_ONION : addr.GetNetClass(); 611 } 612 613 bool CNode::IsConnectedThroughPrivacyNet() const 614 { 615 return m_inbound_onion || addr.IsPrivacyNet(); 616 } 617 618 #undef X 619 #define X(name) stats.name = name 620 void CNode::CopyStats(CNodeStats& stats) 621 { 622 stats.nodeid = this->GetId(); 623 X(addr); 624 X(addrBind); 625 stats.m_network = ConnectedThroughNetwork(); 626 X(m_last_send); 627 X(m_last_recv); 628 X(m_last_tx_time); 629 X(m_last_block_time); 630 X(m_connected); 631 X(m_addr_name); 632 X(nVersion); 633 { 634 LOCK(m_subver_mutex); 635 X(cleanSubVer); 636 } 637 stats.fInbound = IsInboundConn(); 638 X(m_bip152_highbandwidth_to); 639 X(m_bip152_highbandwidth_from); 640 { 641 LOCK(cs_vSend); 642 X(mapSendBytesPerMsgType); 643 X(nSendBytes); 644 } 645 { 646 LOCK(cs_vRecv); 647 X(mapRecvBytesPerMsgType); 648 X(nRecvBytes); 649 Transport::Info info = m_transport->GetInfo(); 650 stats.m_transport_type = info.transport_type; 651 if (info.session_id) stats.m_session_id = HexStr(*info.session_id); 652 } 653 X(m_permission_flags); 654 655 X(m_last_ping_time); 656 X(m_min_ping_time); 657 658 // Leave string empty if addrLocal invalid (not filled in yet) 659 CService addrLocalUnlocked = GetAddrLocal(); 660 stats.addrLocal = addrLocalUnlocked.IsValid() ? addrLocalUnlocked.ToStringAddrPort() : ""; 661 662 X(m_conn_type); 663 } 664 #undef X 665 666 bool CNode::ReceiveMsgBytes(std::span<const uint8_t> msg_bytes, bool& complete) 667 { 668 complete = false; 669 const auto time = GetTime<std::chrono::microseconds>(); 670 LOCK(cs_vRecv); 671 m_last_recv = std::chrono::duration_cast<std::chrono::seconds>(time); 672 nRecvBytes += msg_bytes.size(); 673 while (msg_bytes.size() > 0) { 674 // absorb network data 675 if (!m_transport->ReceivedBytes(msg_bytes)) { 676 // Serious transport problem, disconnect from the peer. 677 return false; 678 } 679 680 if (m_transport->ReceivedMessageComplete()) { 681 // decompose a transport agnostic CNetMessage from the deserializer 682 bool reject_message{false}; 683 CNetMessage msg = m_transport->GetReceivedMessage(time, reject_message); 684 if (reject_message) { 685 // Message deserialization failed. Drop the message but don't disconnect the peer. 686 // store the size of the corrupt message 687 mapRecvBytesPerMsgType.at(NET_MESSAGE_TYPE_OTHER) += msg.m_raw_message_size; 688 continue; 689 } 690 691 // Store received bytes per message type. 692 // To prevent a memory DOS, only allow known message types. 693 auto i = mapRecvBytesPerMsgType.find(msg.m_type); 694 if (i == mapRecvBytesPerMsgType.end()) { 695 i = mapRecvBytesPerMsgType.find(NET_MESSAGE_TYPE_OTHER); 696 } 697 assert(i != mapRecvBytesPerMsgType.end()); 698 i->second += msg.m_raw_message_size; 699 700 // push the message to the process queue, 701 vRecvMsg.push_back(std::move(msg)); 702 703 complete = true; 704 } 705 } 706 707 return true; 708 } 709 710 std::string CNode::LogIP(bool log_ip) const 711 { 712 return log_ip ? strprintf(" peeraddr=%s", addr.ToStringAddrPort()) : ""; 713 } 714 715 std::string CNode::DisconnectMsg(bool log_ip) const 716 { 717 return strprintf("disconnecting peer=%d%s", 718 GetId(), 719 LogIP(log_ip)); 720 } 721 722 V1Transport::V1Transport(const NodeId node_id) noexcept 723 : m_magic_bytes{Params().MessageStart()}, m_node_id{node_id} 724 { 725 LOCK(m_recv_mutex); 726 Reset(); 727 } 728 729 Transport::Info V1Transport::GetInfo() const noexcept 730 { 731 return {.transport_type = TransportProtocolType::V1, .session_id = {}}; 732 } 733 734 int V1Transport::readHeader(std::span<const uint8_t> msg_bytes) 735 { 736 AssertLockHeld(m_recv_mutex); 737 // copy data to temporary parsing buffer 738 unsigned int nRemaining = CMessageHeader::HEADER_SIZE - nHdrPos; 739 unsigned int nCopy = std::min<unsigned int>(nRemaining, msg_bytes.size()); 740 741 memcpy(&hdrbuf[nHdrPos], msg_bytes.data(), nCopy); 742 nHdrPos += nCopy; 743 744 // if header incomplete, exit 745 if (nHdrPos < CMessageHeader::HEADER_SIZE) 746 return nCopy; 747 748 // deserialize to CMessageHeader 749 try { 750 hdrbuf >> hdr; 751 } 752 catch (const std::exception&) { 753 LogDebug(BCLog::NET, "Header error: Unable to deserialize, peer=%d\n", m_node_id); 754 return -1; 755 } 756 757 // Check start string, network magic 758 if (hdr.pchMessageStart != m_magic_bytes) { 759 LogDebug(BCLog::NET, "Header error: Wrong MessageStart %s received, peer=%d\n", HexStr(hdr.pchMessageStart), m_node_id); 760 return -1; 761 } 762 763 // reject messages larger than MAX_SIZE or MAX_PROTOCOL_MESSAGE_LENGTH 764 // NOTE: failing to perform this check previously allowed a malicious peer to make us allocate 32MiB of memory per 765 // connection. See https://bitcoincore.org/en/2024/07/03/disclose_receive_buffer_oom. 766 if (hdr.nMessageSize > MAX_SIZE || hdr.nMessageSize > MAX_PROTOCOL_MESSAGE_LENGTH) { 767 LogDebug(BCLog::NET, "Header error: Size too large (%s, %u bytes), peer=%d\n", SanitizeString(hdr.GetMessageType()), hdr.nMessageSize, m_node_id); 768 return -1; 769 } 770 771 // switch state to reading message data 772 in_data = true; 773 774 return nCopy; 775 } 776 777 int V1Transport::readData(std::span<const uint8_t> msg_bytes) 778 { 779 AssertLockHeld(m_recv_mutex); 780 unsigned int nRemaining = hdr.nMessageSize - nDataPos; 781 unsigned int nCopy = std::min<unsigned int>(nRemaining, msg_bytes.size()); 782 783 if (vRecv.size() < nDataPos + nCopy) { 784 // Allocate up to 256 KiB ahead, but never more than the total message size. 785 vRecv.resize(std::min(hdr.nMessageSize, nDataPos + nCopy + 256 * 1024)); 786 } 787 788 hasher.Write(msg_bytes.first(nCopy)); 789 memcpy(&vRecv[nDataPos], msg_bytes.data(), nCopy); 790 nDataPos += nCopy; 791 792 return nCopy; 793 } 794 795 const uint256& V1Transport::GetMessageHash() const 796 { 797 AssertLockHeld(m_recv_mutex); 798 assert(CompleteInternal()); 799 if (data_hash.IsNull()) 800 hasher.Finalize(data_hash); 801 return data_hash; 802 } 803 804 CNetMessage V1Transport::GetReceivedMessage(const std::chrono::microseconds time, bool& reject_message) 805 { 806 AssertLockNotHeld(m_recv_mutex); 807 // Initialize out parameter 808 reject_message = false; 809 // decompose a single CNetMessage from the TransportDeserializer 810 LOCK(m_recv_mutex); 811 CNetMessage msg(std::move(vRecv)); 812 813 // store message type string, time, and sizes 814 msg.m_type = hdr.GetMessageType(); 815 msg.m_time = time; 816 msg.m_message_size = hdr.nMessageSize; 817 msg.m_raw_message_size = hdr.nMessageSize + CMessageHeader::HEADER_SIZE; 818 819 uint256 hash = GetMessageHash(); 820 821 // We just received a message off the wire, harvest entropy from the time (and the message checksum) 822 RandAddEvent(ReadLE32(hash.begin())); 823 824 // Check checksum and header message type string 825 if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0) { 826 LogDebug(BCLog::NET, "Header error: Wrong checksum (%s, %u bytes), expected %s was %s, peer=%d\n", 827 SanitizeString(msg.m_type), msg.m_message_size, 828 HexStr(std::span{hash}.first(CMessageHeader::CHECKSUM_SIZE)), 829 HexStr(hdr.pchChecksum), 830 m_node_id); 831 reject_message = true; 832 } else if (!hdr.IsMessageTypeValid()) { 833 LogDebug(BCLog::NET, "Header error: Invalid message type (%s, %u bytes), peer=%d\n", 834 SanitizeString(hdr.GetMessageType()), msg.m_message_size, m_node_id); 835 reject_message = true; 836 } 837 838 // Always reset the network deserializer (prepare for the next message) 839 Reset(); 840 return msg; 841 } 842 843 bool V1Transport::SetMessageToSend(CSerializedNetMsg& msg) noexcept 844 { 845 AssertLockNotHeld(m_send_mutex); 846 // Determine whether a new message can be set. 847 LOCK(m_send_mutex); 848 if (m_sending_header || m_bytes_sent < m_message_to_send.data.size()) return false; 849 850 // create dbl-sha256 checksum 851 uint256 hash = Hash(msg.data); 852 853 // create header 854 CMessageHeader hdr(m_magic_bytes, msg.m_type.c_str(), msg.data.size()); 855 memcpy(hdr.pchChecksum, hash.begin(), CMessageHeader::CHECKSUM_SIZE); 856 857 // serialize header 858 m_header_to_send.clear(); 859 VectorWriter{m_header_to_send, 0, hdr}; 860 861 // update state 862 m_message_to_send = std::move(msg); 863 m_sending_header = true; 864 m_bytes_sent = 0; 865 return true; 866 } 867 868 Transport::BytesToSend V1Transport::GetBytesToSend(bool have_next_message) const noexcept 869 { 870 AssertLockNotHeld(m_send_mutex); 871 LOCK(m_send_mutex); 872 if (m_sending_header) { 873 return {std::span{m_header_to_send}.subspan(m_bytes_sent), 874 // We have more to send after the header if the message has payload, or if there 875 // is a next message after that. 876 have_next_message || !m_message_to_send.data.empty(), 877 m_message_to_send.m_type 878 }; 879 } else { 880 return {std::span{m_message_to_send.data}.subspan(m_bytes_sent), 881 // We only have more to send after this message's payload if there is another 882 // message. 883 have_next_message, 884 m_message_to_send.m_type 885 }; 886 } 887 } 888 889 void V1Transport::MarkBytesSent(size_t bytes_sent) noexcept 890 { 891 AssertLockNotHeld(m_send_mutex); 892 LOCK(m_send_mutex); 893 m_bytes_sent += bytes_sent; 894 if (m_sending_header && m_bytes_sent == m_header_to_send.size()) { 895 // We're done sending a message's header. Switch to sending its data bytes. 896 m_sending_header = false; 897 m_bytes_sent = 0; 898 } else if (!m_sending_header && m_bytes_sent == m_message_to_send.data.size()) { 899 // We're done sending a message's data. Wipe the data vector to reduce memory consumption. 900 ClearShrink(m_message_to_send.data); 901 m_bytes_sent = 0; 902 } 903 } 904 905 size_t V1Transport::GetSendMemoryUsage() const noexcept 906 { 907 AssertLockNotHeld(m_send_mutex); 908 LOCK(m_send_mutex); 909 // Don't count sending-side fields besides m_message_to_send, as they're all small and bounded. 910 return m_message_to_send.GetMemoryUsage(); 911 } 912 913 namespace { 914 915 /** List of short messages as defined in BIP324, in order. 916 * 917 * Only message types that are actually implemented in this codebase need to be listed, as other 918 * messages get ignored anyway - whether we know how to decode them or not. 919 */ 920 const std::array<std::string, 33> V2_MESSAGE_IDS = { 921 "", // 12 bytes follow encoding the message type like in V1 922 NetMsgType::ADDR, 923 NetMsgType::BLOCK, 924 NetMsgType::BLOCKTXN, 925 NetMsgType::CMPCTBLOCK, 926 NetMsgType::FEEFILTER, 927 NetMsgType::FILTERADD, 928 NetMsgType::FILTERCLEAR, 929 NetMsgType::FILTERLOAD, 930 NetMsgType::GETBLOCKS, 931 NetMsgType::GETBLOCKTXN, 932 NetMsgType::GETDATA, 933 NetMsgType::GETHEADERS, 934 NetMsgType::HEADERS, 935 NetMsgType::INV, 936 NetMsgType::MEMPOOL, 937 NetMsgType::MERKLEBLOCK, 938 NetMsgType::NOTFOUND, 939 NetMsgType::PING, 940 NetMsgType::PONG, 941 NetMsgType::SENDCMPCT, 942 NetMsgType::TX, 943 NetMsgType::GETCFILTERS, 944 NetMsgType::CFILTER, 945 NetMsgType::GETCFHEADERS, 946 NetMsgType::CFHEADERS, 947 NetMsgType::GETCFCHECKPT, 948 NetMsgType::CFCHECKPT, 949 NetMsgType::ADDRV2, 950 // Unimplemented message types that are assigned in BIP324: 951 "", 952 "", 953 "", 954 "" 955 }; 956 957 class V2MessageMap 958 { 959 std::unordered_map<std::string, uint8_t> m_map; 960 961 public: 962 V2MessageMap() noexcept 963 { 964 for (size_t i = 1; i < std::size(V2_MESSAGE_IDS); ++i) { 965 m_map.emplace(V2_MESSAGE_IDS[i], i); 966 } 967 } 968 969 std::optional<uint8_t> operator()(const std::string& message_name) const noexcept 970 { 971 auto it = m_map.find(message_name); 972 if (it == m_map.end()) return std::nullopt; 973 return it->second; 974 } 975 }; 976 977 const V2MessageMap V2_MESSAGE_MAP; 978 979 std::vector<uint8_t> GenerateRandomGarbage() noexcept 980 { 981 std::vector<uint8_t> ret; 982 FastRandomContext rng; 983 ret.resize(rng.randrange(V2Transport::MAX_GARBAGE_LEN + 1)); 984 rng.fillrand(MakeWritableByteSpan(ret)); 985 return ret; 986 } 987 988 } // namespace 989 990 void V2Transport::StartSendingHandshake() noexcept 991 { 992 AssertLockHeld(m_send_mutex); 993 Assume(m_send_state == SendState::AWAITING_KEY); 994 Assume(m_send_buffer.empty()); 995 // Initialize the send buffer with ellswift pubkey + provided garbage. 996 m_send_buffer.resize(EllSwiftPubKey::size() + m_send_garbage.size()); 997 std::copy(std::begin(m_cipher.GetOurPubKey()), std::end(m_cipher.GetOurPubKey()), MakeWritableByteSpan(m_send_buffer).begin()); 998 std::copy(m_send_garbage.begin(), m_send_garbage.end(), m_send_buffer.begin() + EllSwiftPubKey::size()); 999 // We cannot wipe m_send_garbage as it will still be used as AAD later in the handshake. 1000 } 1001 1002 V2Transport::V2Transport(NodeId nodeid, bool initiating, const CKey& key, std::span<const std::byte> ent32, std::vector<uint8_t> garbage) noexcept 1003 : m_cipher{key, ent32}, m_initiating{initiating}, m_nodeid{nodeid}, 1004 m_v1_fallback{nodeid}, 1005 m_recv_state{initiating ? RecvState::KEY : RecvState::KEY_MAYBE_V1}, 1006 m_send_garbage{std::move(garbage)}, 1007 m_send_state{initiating ? SendState::AWAITING_KEY : SendState::MAYBE_V1} 1008 { 1009 Assume(m_send_garbage.size() <= MAX_GARBAGE_LEN); 1010 // Start sending immediately if we're the initiator of the connection. 1011 if (initiating) { 1012 LOCK(m_send_mutex); 1013 StartSendingHandshake(); 1014 } 1015 } 1016 1017 V2Transport::V2Transport(NodeId nodeid, bool initiating) noexcept 1018 : V2Transport{nodeid, initiating, GenerateRandomKey(), 1019 MakeByteSpan(GetRandHash()), GenerateRandomGarbage()} {} 1020 1021 void V2Transport::SetReceiveState(RecvState recv_state) noexcept 1022 { 1023 AssertLockHeld(m_recv_mutex); 1024 // Enforce allowed state transitions. 1025 switch (m_recv_state) { 1026 case RecvState::KEY_MAYBE_V1: 1027 Assume(recv_state == RecvState::KEY || recv_state == RecvState::V1); 1028 break; 1029 case RecvState::KEY: 1030 Assume(recv_state == RecvState::GARB_GARBTERM); 1031 break; 1032 case RecvState::GARB_GARBTERM: 1033 Assume(recv_state == RecvState::VERSION); 1034 break; 1035 case RecvState::VERSION: 1036 Assume(recv_state == RecvState::APP); 1037 break; 1038 case RecvState::APP: 1039 Assume(recv_state == RecvState::APP_READY); 1040 break; 1041 case RecvState::APP_READY: 1042 Assume(recv_state == RecvState::APP); 1043 break; 1044 case RecvState::V1: 1045 Assume(false); // V1 state cannot be left 1046 break; 1047 } 1048 // Change state. 1049 m_recv_state = recv_state; 1050 } 1051 1052 void V2Transport::SetSendState(SendState send_state) noexcept 1053 { 1054 AssertLockHeld(m_send_mutex); 1055 // Enforce allowed state transitions. 1056 switch (m_send_state) { 1057 case SendState::MAYBE_V1: 1058 Assume(send_state == SendState::V1 || send_state == SendState::AWAITING_KEY); 1059 break; 1060 case SendState::AWAITING_KEY: 1061 Assume(send_state == SendState::READY); 1062 break; 1063 case SendState::READY: 1064 case SendState::V1: 1065 Assume(false); // Final states 1066 break; 1067 } 1068 // Change state. 1069 m_send_state = send_state; 1070 } 1071 1072 bool V2Transport::ReceivedMessageComplete() const noexcept 1073 { 1074 AssertLockNotHeld(m_recv_mutex); 1075 LOCK(m_recv_mutex); 1076 if (m_recv_state == RecvState::V1) return m_v1_fallback.ReceivedMessageComplete(); 1077 1078 return m_recv_state == RecvState::APP_READY; 1079 } 1080 1081 void V2Transport::ProcessReceivedMaybeV1Bytes() noexcept 1082 { 1083 AssertLockHeld(m_recv_mutex); 1084 AssertLockNotHeld(m_send_mutex); 1085 Assume(m_recv_state == RecvState::KEY_MAYBE_V1); 1086 // We still have to determine if this is a v1 or v2 connection. The bytes being received could 1087 // be the beginning of either a v1 packet (network magic + "version\x00\x00\x00\x00\x00"), or 1088 // of a v2 public key. BIP324 specifies that a mismatch with this 16-byte string should trigger 1089 // sending of the key. 1090 std::array<uint8_t, V1_PREFIX_LEN> v1_prefix = {0, 0, 0, 0, 'v', 'e', 'r', 's', 'i', 'o', 'n', 0, 0, 0, 0, 0}; 1091 std::copy(std::begin(Params().MessageStart()), std::end(Params().MessageStart()), v1_prefix.begin()); 1092 Assume(m_recv_buffer.size() <= v1_prefix.size()); 1093 if (!std::equal(m_recv_buffer.begin(), m_recv_buffer.end(), v1_prefix.begin())) { 1094 // Mismatch with v1 prefix, so we can assume a v2 connection. 1095 SetReceiveState(RecvState::KEY); // Convert to KEY state, leaving received bytes around. 1096 // Transition the sender to AWAITING_KEY state and start sending. 1097 LOCK(m_send_mutex); 1098 SetSendState(SendState::AWAITING_KEY); 1099 StartSendingHandshake(); 1100 } else if (m_recv_buffer.size() == v1_prefix.size()) { 1101 // Full match with the v1 prefix, so fall back to v1 behavior. 1102 LOCK(m_send_mutex); 1103 std::span<const uint8_t> feedback{m_recv_buffer}; 1104 // Feed already received bytes to v1 transport. It should always accept these, because it's 1105 // less than the size of a v1 header, and these are the first bytes fed to m_v1_fallback. 1106 bool ret = m_v1_fallback.ReceivedBytes(feedback); 1107 Assume(feedback.empty()); 1108 Assume(ret); 1109 SetReceiveState(RecvState::V1); 1110 SetSendState(SendState::V1); 1111 // Reset v2 transport buffers to save memory. 1112 ClearShrink(m_recv_buffer); 1113 ClearShrink(m_send_buffer); 1114 } else { 1115 // We have not received enough to distinguish v1 from v2 yet. Wait until more bytes come. 1116 } 1117 } 1118 1119 bool V2Transport::ProcessReceivedKeyBytes() noexcept 1120 { 1121 AssertLockHeld(m_recv_mutex); 1122 AssertLockNotHeld(m_send_mutex); 1123 Assume(m_recv_state == RecvState::KEY); 1124 Assume(m_recv_buffer.size() <= EllSwiftPubKey::size()); 1125 1126 // As a special exception, if bytes 4-16 of the key on a responder connection match the 1127 // corresponding bytes of a V1 version message, but bytes 0-4 don't match the network magic 1128 // (if they did, we'd have switched to V1 state already), assume this is a peer from 1129 // another network, and disconnect them. They will almost certainly disconnect us too when 1130 // they receive our uniformly random key and garbage, but detecting this case specially 1131 // means we can log it. 1132 static constexpr std::array<uint8_t, 12> MATCH = {'v', 'e', 'r', 's', 'i', 'o', 'n', 0, 0, 0, 0, 0}; 1133 static constexpr size_t OFFSET = std::tuple_size_v<MessageStartChars>; 1134 if (!m_initiating && m_recv_buffer.size() >= OFFSET + MATCH.size()) { 1135 if (std::equal(MATCH.begin(), MATCH.end(), m_recv_buffer.begin() + OFFSET)) { 1136 LogDebug(BCLog::NET, "V2 transport error: V1 peer with wrong MessageStart %s\n", 1137 HexStr(std::span(m_recv_buffer).first(OFFSET))); 1138 return false; 1139 } 1140 } 1141 1142 if (m_recv_buffer.size() == EllSwiftPubKey::size()) { 1143 // Other side's key has been fully received, and can now be Diffie-Hellman combined with 1144 // our key to initialize the encryption ciphers. 1145 1146 // Initialize the ciphers. 1147 EllSwiftPubKey ellswift(MakeByteSpan(m_recv_buffer)); 1148 LOCK(m_send_mutex); 1149 m_cipher.Initialize(ellswift, m_initiating); 1150 1151 // Switch receiver state to GARB_GARBTERM. 1152 SetReceiveState(RecvState::GARB_GARBTERM); 1153 m_recv_buffer.clear(); 1154 1155 // Switch sender state to READY. 1156 SetSendState(SendState::READY); 1157 1158 // Append the garbage terminator to the send buffer. 1159 m_send_buffer.resize(m_send_buffer.size() + BIP324Cipher::GARBAGE_TERMINATOR_LEN); 1160 std::copy(m_cipher.GetSendGarbageTerminator().begin(), 1161 m_cipher.GetSendGarbageTerminator().end(), 1162 MakeWritableByteSpan(m_send_buffer).last(BIP324Cipher::GARBAGE_TERMINATOR_LEN).begin()); 1163 1164 // Construct version packet in the send buffer, with the sent garbage data as AAD. 1165 m_send_buffer.resize(m_send_buffer.size() + BIP324Cipher::EXPANSION + VERSION_CONTENTS.size()); 1166 m_cipher.Encrypt( 1167 /*contents=*/VERSION_CONTENTS, 1168 /*aad=*/MakeByteSpan(m_send_garbage), 1169 /*ignore=*/false, 1170 /*output=*/MakeWritableByteSpan(m_send_buffer).last(BIP324Cipher::EXPANSION + VERSION_CONTENTS.size())); 1171 // We no longer need the garbage. 1172 ClearShrink(m_send_garbage); 1173 } else { 1174 // We still have to receive more key bytes. 1175 } 1176 return true; 1177 } 1178 1179 bool V2Transport::ProcessReceivedGarbageBytes() noexcept 1180 { 1181 AssertLockHeld(m_recv_mutex); 1182 Assume(m_recv_state == RecvState::GARB_GARBTERM); 1183 Assume(m_recv_buffer.size() <= MAX_GARBAGE_LEN + BIP324Cipher::GARBAGE_TERMINATOR_LEN); 1184 if (m_recv_buffer.size() >= BIP324Cipher::GARBAGE_TERMINATOR_LEN) { 1185 if (std::ranges::equal(MakeByteSpan(m_recv_buffer).last(BIP324Cipher::GARBAGE_TERMINATOR_LEN), m_cipher.GetReceiveGarbageTerminator())) { 1186 // Garbage terminator received. Store garbage to authenticate it as AAD later. 1187 m_recv_aad = std::move(m_recv_buffer); 1188 m_recv_aad.resize(m_recv_aad.size() - BIP324Cipher::GARBAGE_TERMINATOR_LEN); 1189 m_recv_buffer.clear(); 1190 SetReceiveState(RecvState::VERSION); 1191 } else if (m_recv_buffer.size() == MAX_GARBAGE_LEN + BIP324Cipher::GARBAGE_TERMINATOR_LEN) { 1192 // We've reached the maximum length for garbage + garbage terminator, and the 1193 // terminator still does not match. Abort. 1194 LogDebug(BCLog::NET, "V2 transport error: missing garbage terminator, peer=%d\n", m_nodeid); 1195 return false; 1196 } else { 1197 // We still need to receive more garbage and/or garbage terminator bytes. 1198 } 1199 } else { 1200 // We have less than GARBAGE_TERMINATOR_LEN (16) bytes, so we certainly need to receive 1201 // more first. 1202 } 1203 return true; 1204 } 1205 1206 bool V2Transport::ProcessReceivedPacketBytes() noexcept 1207 { 1208 AssertLockHeld(m_recv_mutex); 1209 Assume(m_recv_state == RecvState::VERSION || m_recv_state == RecvState::APP); 1210 1211 // The maximum permitted contents length for a packet, consisting of: 1212 // - 0x00 byte: indicating long message type encoding 1213 // - 12 bytes of message type 1214 // - payload 1215 static constexpr size_t MAX_CONTENTS_LEN = 1216 1 + CMessageHeader::MESSAGE_TYPE_SIZE + 1217 std::min<size_t>(MAX_SIZE, MAX_PROTOCOL_MESSAGE_LENGTH); 1218 1219 if (m_recv_buffer.size() == BIP324Cipher::LENGTH_LEN) { 1220 // Length descriptor received. 1221 m_recv_len = m_cipher.DecryptLength(MakeByteSpan(m_recv_buffer)); 1222 if (m_recv_len > MAX_CONTENTS_LEN) { 1223 LogDebug(BCLog::NET, "V2 transport error: packet too large (%u bytes), peer=%d\n", m_recv_len, m_nodeid); 1224 return false; 1225 } 1226 } else if (m_recv_buffer.size() > BIP324Cipher::LENGTH_LEN && m_recv_buffer.size() == m_recv_len + BIP324Cipher::EXPANSION) { 1227 // Ciphertext received, decrypt it into m_recv_decode_buffer. 1228 // Note that it is impossible to reach this branch without hitting the branch above first, 1229 // as GetMaxBytesToProcess only allows up to LENGTH_LEN into the buffer before that point. 1230 m_recv_decode_buffer.resize(m_recv_len); 1231 bool ignore{false}; 1232 bool ret = m_cipher.Decrypt( 1233 /*input=*/MakeByteSpan(m_recv_buffer).subspan(BIP324Cipher::LENGTH_LEN), 1234 /*aad=*/MakeByteSpan(m_recv_aad), 1235 /*ignore=*/ignore, 1236 /*contents=*/MakeWritableByteSpan(m_recv_decode_buffer)); 1237 if (!ret) { 1238 LogDebug(BCLog::NET, "V2 transport error: packet decryption failure (%u bytes), peer=%d\n", m_recv_len, m_nodeid); 1239 return false; 1240 } 1241 // We have decrypted a valid packet with the AAD we expected, so clear the expected AAD. 1242 ClearShrink(m_recv_aad); 1243 // Feed the last 4 bytes of the Poly1305 authentication tag (and its timing) into our RNG. 1244 RandAddEvent(ReadLE32(m_recv_buffer.data() + m_recv_buffer.size() - 4)); 1245 1246 // At this point we have a valid packet decrypted into m_recv_decode_buffer. If it's not a 1247 // decoy, which we simply ignore, use the current state to decide what to do with it. 1248 if (!ignore) { 1249 switch (m_recv_state) { 1250 case RecvState::VERSION: 1251 // Version message received; transition to application phase. The contents is 1252 // ignored, but can be used for future extensions. 1253 SetReceiveState(RecvState::APP); 1254 break; 1255 case RecvState::APP: 1256 // Application message decrypted correctly. It can be extracted using GetMessage(). 1257 SetReceiveState(RecvState::APP_READY); 1258 break; 1259 default: 1260 // Any other state is invalid (this function should not have been called). 1261 Assume(false); 1262 } 1263 } 1264 // Wipe the receive buffer where the next packet will be received into. 1265 ClearShrink(m_recv_buffer); 1266 // In all but APP_READY state, we can wipe the decoded contents. 1267 if (m_recv_state != RecvState::APP_READY) ClearShrink(m_recv_decode_buffer); 1268 } else { 1269 // We either have less than 3 bytes, so we don't know the packet's length yet, or more 1270 // than 3 bytes but less than the packet's full ciphertext. Wait until those arrive. 1271 } 1272 return true; 1273 } 1274 1275 size_t V2Transport::GetMaxBytesToProcess() noexcept 1276 { 1277 AssertLockHeld(m_recv_mutex); 1278 switch (m_recv_state) { 1279 case RecvState::KEY_MAYBE_V1: 1280 // During the KEY_MAYBE_V1 state we do not allow more than the length of v1 prefix into the 1281 // receive buffer. 1282 Assume(m_recv_buffer.size() <= V1_PREFIX_LEN); 1283 // As long as we're not sure if this is a v1 or v2 connection, don't receive more than what 1284 // is strictly necessary to distinguish the two (16 bytes). If we permitted more than 1285 // the v1 header size (24 bytes), we may not be able to feed the already-received bytes 1286 // back into the m_v1_fallback V1 transport. 1287 return V1_PREFIX_LEN - m_recv_buffer.size(); 1288 case RecvState::KEY: 1289 // During the KEY state, we only allow the 64-byte key into the receive buffer. 1290 Assume(m_recv_buffer.size() <= EllSwiftPubKey::size()); 1291 // As long as we have not received the other side's public key, don't receive more than 1292 // that (64 bytes), as garbage follows, and locating the garbage terminator requires the 1293 // key exchange first. 1294 return EllSwiftPubKey::size() - m_recv_buffer.size(); 1295 case RecvState::GARB_GARBTERM: 1296 // Process garbage bytes one by one (because terminator may appear anywhere). 1297 return 1; 1298 case RecvState::VERSION: 1299 case RecvState::APP: 1300 // These three states all involve decoding a packet. Process the length descriptor first, 1301 // so that we know where the current packet ends (and we don't process bytes from the next 1302 // packet or decoy yet). Then, process the ciphertext bytes of the current packet. 1303 if (m_recv_buffer.size() < BIP324Cipher::LENGTH_LEN) { 1304 return BIP324Cipher::LENGTH_LEN - m_recv_buffer.size(); 1305 } else { 1306 // Note that BIP324Cipher::EXPANSION is the total difference between contents size 1307 // and encoded packet size, which includes the 3 bytes due to the packet length. 1308 // When transitioning from receiving the packet length to receiving its ciphertext, 1309 // the encrypted packet length is left in the receive buffer. 1310 return BIP324Cipher::EXPANSION + m_recv_len - m_recv_buffer.size(); 1311 } 1312 case RecvState::APP_READY: 1313 // No bytes can be processed until GetMessage() is called. 1314 return 0; 1315 case RecvState::V1: 1316 // Not allowed (must be dealt with by the caller). 1317 Assume(false); 1318 return 0; 1319 } 1320 Assume(false); // unreachable 1321 return 0; 1322 } 1323 1324 bool V2Transport::ReceivedBytes(std::span<const uint8_t>& msg_bytes) noexcept 1325 { 1326 AssertLockNotHeld(m_recv_mutex); 1327 /** How many bytes to allocate in the receive buffer at most above what is received so far. */ 1328 static constexpr size_t MAX_RESERVE_AHEAD = 256 * 1024; 1329 1330 LOCK(m_recv_mutex); 1331 if (m_recv_state == RecvState::V1) return m_v1_fallback.ReceivedBytes(msg_bytes); 1332 1333 // Process the provided bytes in msg_bytes in a loop. In each iteration a nonzero number of 1334 // bytes (decided by GetMaxBytesToProcess) are taken from the beginning om msg_bytes, and 1335 // appended to m_recv_buffer. Then, depending on the receiver state, one of the 1336 // ProcessReceived*Bytes functions is called to process the bytes in that buffer. 1337 while (!msg_bytes.empty()) { 1338 // Decide how many bytes to copy from msg_bytes to m_recv_buffer. 1339 size_t max_read = GetMaxBytesToProcess(); 1340 1341 // Reserve space in the buffer if there is not enough. 1342 if (m_recv_buffer.size() + std::min(msg_bytes.size(), max_read) > m_recv_buffer.capacity()) { 1343 switch (m_recv_state) { 1344 case RecvState::KEY_MAYBE_V1: 1345 case RecvState::KEY: 1346 case RecvState::GARB_GARBTERM: 1347 // During the initial states (key/garbage), allocate once to fit the maximum (4111 1348 // bytes). 1349 m_recv_buffer.reserve(MAX_GARBAGE_LEN + BIP324Cipher::GARBAGE_TERMINATOR_LEN); 1350 break; 1351 case RecvState::VERSION: 1352 case RecvState::APP: { 1353 // During states where a packet is being received, as much as is expected but never 1354 // more than MAX_RESERVE_AHEAD bytes in addition to what is received so far. 1355 // This means attackers that want to cause us to waste allocated memory are limited 1356 // to MAX_RESERVE_AHEAD above the largest allowed message contents size, and to 1357 // MAX_RESERVE_AHEAD more than they've actually sent us. 1358 size_t alloc_add = std::min(max_read, msg_bytes.size() + MAX_RESERVE_AHEAD); 1359 m_recv_buffer.reserve(m_recv_buffer.size() + alloc_add); 1360 break; 1361 } 1362 case RecvState::APP_READY: 1363 // The buffer is empty in this state. 1364 Assume(m_recv_buffer.empty()); 1365 break; 1366 case RecvState::V1: 1367 // Should have bailed out above. 1368 Assume(false); 1369 break; 1370 } 1371 } 1372 1373 // Can't read more than provided input. 1374 max_read = std::min(msg_bytes.size(), max_read); 1375 // Copy data to buffer. 1376 m_recv_buffer.insert(m_recv_buffer.end(), UCharCast(msg_bytes.data()), UCharCast(msg_bytes.data() + max_read)); 1377 msg_bytes = msg_bytes.subspan(max_read); 1378 1379 // Process data in the buffer. 1380 switch (m_recv_state) { 1381 case RecvState::KEY_MAYBE_V1: 1382 ProcessReceivedMaybeV1Bytes(); 1383 if (m_recv_state == RecvState::V1) return true; 1384 break; 1385 1386 case RecvState::KEY: 1387 if (!ProcessReceivedKeyBytes()) return false; 1388 break; 1389 1390 case RecvState::GARB_GARBTERM: 1391 if (!ProcessReceivedGarbageBytes()) return false; 1392 break; 1393 1394 case RecvState::VERSION: 1395 case RecvState::APP: 1396 if (!ProcessReceivedPacketBytes()) return false; 1397 break; 1398 1399 case RecvState::APP_READY: 1400 return true; 1401 1402 case RecvState::V1: 1403 // We should have bailed out before. 1404 Assume(false); 1405 break; 1406 } 1407 // Make sure we have made progress before continuing. 1408 Assume(max_read > 0); 1409 } 1410 1411 return true; 1412 } 1413 1414 std::optional<std::string> V2Transport::GetMessageType(std::span<const uint8_t>& contents) noexcept 1415 { 1416 if (contents.size() == 0) return std::nullopt; // Empty contents 1417 uint8_t first_byte = contents[0]; 1418 contents = contents.subspan(1); // Strip first byte. 1419 1420 if (first_byte != 0) { 1421 // Short (1 byte) encoding. 1422 if (first_byte < std::size(V2_MESSAGE_IDS)) { 1423 // Valid short message id. 1424 return V2_MESSAGE_IDS[first_byte]; 1425 } else { 1426 // Unknown short message id. 1427 return std::nullopt; 1428 } 1429 } 1430 1431 if (contents.size() < CMessageHeader::MESSAGE_TYPE_SIZE) { 1432 return std::nullopt; // Long encoding needs 12 message type bytes. 1433 } 1434 1435 size_t msg_type_len{0}; 1436 while (msg_type_len < CMessageHeader::MESSAGE_TYPE_SIZE && contents[msg_type_len] != 0) { 1437 // Verify that message type bytes before the first 0x00 are in range. 1438 if (contents[msg_type_len] < ' ' || contents[msg_type_len] > 0x7F) { 1439 return {}; 1440 } 1441 ++msg_type_len; 1442 } 1443 std::string ret{reinterpret_cast<const char*>(contents.data()), msg_type_len}; 1444 while (msg_type_len < CMessageHeader::MESSAGE_TYPE_SIZE) { 1445 // Verify that message type bytes after the first 0x00 are also 0x00. 1446 if (contents[msg_type_len] != 0) return {}; 1447 ++msg_type_len; 1448 } 1449 // Strip message type bytes of contents. 1450 contents = contents.subspan(CMessageHeader::MESSAGE_TYPE_SIZE); 1451 return ret; 1452 } 1453 1454 CNetMessage V2Transport::GetReceivedMessage(std::chrono::microseconds time, bool& reject_message) noexcept 1455 { 1456 AssertLockNotHeld(m_recv_mutex); 1457 LOCK(m_recv_mutex); 1458 if (m_recv_state == RecvState::V1) return m_v1_fallback.GetReceivedMessage(time, reject_message); 1459 1460 Assume(m_recv_state == RecvState::APP_READY); 1461 std::span<const uint8_t> contents{m_recv_decode_buffer}; 1462 auto msg_type = GetMessageType(contents); 1463 CNetMessage msg{DataStream{}}; 1464 // Note that BIP324Cipher::EXPANSION also includes the length descriptor size. 1465 msg.m_raw_message_size = m_recv_decode_buffer.size() + BIP324Cipher::EXPANSION; 1466 if (msg_type) { 1467 reject_message = false; 1468 msg.m_type = std::move(*msg_type); 1469 msg.m_time = time; 1470 msg.m_message_size = contents.size(); 1471 msg.m_recv.resize(contents.size()); 1472 std::copy(contents.begin(), contents.end(), UCharCast(msg.m_recv.data())); 1473 } else { 1474 LogDebug(BCLog::NET, "V2 transport error: invalid message type (%u bytes contents), peer=%d\n", m_recv_decode_buffer.size(), m_nodeid); 1475 reject_message = true; 1476 } 1477 ClearShrink(m_recv_decode_buffer); 1478 SetReceiveState(RecvState::APP); 1479 1480 return msg; 1481 } 1482 1483 bool V2Transport::SetMessageToSend(CSerializedNetMsg& msg) noexcept 1484 { 1485 AssertLockNotHeld(m_send_mutex); 1486 LOCK(m_send_mutex); 1487 if (m_send_state == SendState::V1) return m_v1_fallback.SetMessageToSend(msg); 1488 // We only allow adding a new message to be sent when in the READY state (so the packet cipher 1489 // is available) and the send buffer is empty. This limits the number of messages in the send 1490 // buffer to just one, and leaves the responsibility for queueing them up to the caller. 1491 if (!(m_send_state == SendState::READY && m_send_buffer.empty())) return false; 1492 // Construct contents (encoding message type + payload). 1493 std::vector<uint8_t> contents; 1494 auto short_message_id = V2_MESSAGE_MAP(msg.m_type); 1495 if (short_message_id) { 1496 contents.resize(1 + msg.data.size()); 1497 contents[0] = *short_message_id; 1498 std::copy(msg.data.begin(), msg.data.end(), contents.begin() + 1); 1499 } else { 1500 // Initialize with zeroes, and then write the message type string starting at offset 1. 1501 // This means contents[0] and the unused positions in contents[1..13] remain 0x00. 1502 contents.resize(1 + CMessageHeader::MESSAGE_TYPE_SIZE + msg.data.size(), 0); 1503 std::copy(msg.m_type.begin(), msg.m_type.end(), contents.data() + 1); 1504 std::copy(msg.data.begin(), msg.data.end(), contents.begin() + 1 + CMessageHeader::MESSAGE_TYPE_SIZE); 1505 } 1506 // Construct ciphertext in send buffer. 1507 m_send_buffer.resize(contents.size() + BIP324Cipher::EXPANSION); 1508 m_cipher.Encrypt(MakeByteSpan(contents), {}, false, MakeWritableByteSpan(m_send_buffer)); 1509 m_send_type = msg.m_type; 1510 // Release memory 1511 ClearShrink(msg.data); 1512 return true; 1513 } 1514 1515 Transport::BytesToSend V2Transport::GetBytesToSend(bool have_next_message) const noexcept 1516 { 1517 AssertLockNotHeld(m_send_mutex); 1518 LOCK(m_send_mutex); 1519 if (m_send_state == SendState::V1) return m_v1_fallback.GetBytesToSend(have_next_message); 1520 1521 if (m_send_state == SendState::MAYBE_V1) Assume(m_send_buffer.empty()); 1522 Assume(m_send_pos <= m_send_buffer.size()); 1523 return { 1524 std::span{m_send_buffer}.subspan(m_send_pos), 1525 // We only have more to send after the current m_send_buffer if there is a (next) 1526 // message to be sent, and we're capable of sending packets. */ 1527 have_next_message && m_send_state == SendState::READY, 1528 m_send_type 1529 }; 1530 } 1531 1532 void V2Transport::MarkBytesSent(size_t bytes_sent) noexcept 1533 { 1534 AssertLockNotHeld(m_send_mutex); 1535 LOCK(m_send_mutex); 1536 if (m_send_state == SendState::V1) return m_v1_fallback.MarkBytesSent(bytes_sent); 1537 1538 if (m_send_state == SendState::AWAITING_KEY && m_send_pos == 0 && bytes_sent > 0) { 1539 LogDebug(BCLog::NET, "start sending v2 handshake to peer=%d\n", m_nodeid); 1540 } 1541 1542 m_send_pos += bytes_sent; 1543 Assume(m_send_pos <= m_send_buffer.size()); 1544 if (m_send_pos >= CMessageHeader::HEADER_SIZE) { 1545 m_sent_v1_header_worth = true; 1546 } 1547 // Wipe the buffer when everything is sent. 1548 if (m_send_pos == m_send_buffer.size()) { 1549 m_send_pos = 0; 1550 ClearShrink(m_send_buffer); 1551 } 1552 } 1553 1554 bool V2Transport::ShouldReconnectV1() const noexcept 1555 { 1556 AssertLockNotHeld(m_send_mutex); 1557 AssertLockNotHeld(m_recv_mutex); 1558 // Only outgoing connections need reconnection. 1559 if (!m_initiating) return false; 1560 1561 LOCK(m_recv_mutex); 1562 // We only reconnect in the very first state and when the receive buffer is empty. Together 1563 // these conditions imply nothing has been received so far. 1564 if (m_recv_state != RecvState::KEY) return false; 1565 if (!m_recv_buffer.empty()) return false; 1566 // Check if we've sent enough for the other side to disconnect us (if it was V1). 1567 LOCK(m_send_mutex); 1568 return m_sent_v1_header_worth; 1569 } 1570 1571 size_t V2Transport::GetSendMemoryUsage() const noexcept 1572 { 1573 AssertLockNotHeld(m_send_mutex); 1574 LOCK(m_send_mutex); 1575 if (m_send_state == SendState::V1) return m_v1_fallback.GetSendMemoryUsage(); 1576 1577 return sizeof(m_send_buffer) + memusage::DynamicUsage(m_send_buffer); 1578 } 1579 1580 Transport::Info V2Transport::GetInfo() const noexcept 1581 { 1582 AssertLockNotHeld(m_recv_mutex); 1583 LOCK(m_recv_mutex); 1584 if (m_recv_state == RecvState::V1) return m_v1_fallback.GetInfo(); 1585 1586 Transport::Info info; 1587 1588 // Do not report v2 and session ID until the version packet has been received 1589 // and verified (confirming that the other side very likely has the same keys as us). 1590 if (m_recv_state != RecvState::KEY_MAYBE_V1 && m_recv_state != RecvState::KEY && 1591 m_recv_state != RecvState::GARB_GARBTERM && m_recv_state != RecvState::VERSION) { 1592 info.transport_type = TransportProtocolType::V2; 1593 info.session_id = uint256(MakeUCharSpan(m_cipher.GetSessionID())); 1594 } else { 1595 info.transport_type = TransportProtocolType::DETECTING; 1596 } 1597 1598 return info; 1599 } 1600 1601 std::pair<size_t, bool> CConnman::SocketSendData(CNode& node) const 1602 { 1603 auto it = node.vSendMsg.begin(); 1604 size_t nSentSize = 0; 1605 bool data_left{false}; //!< second return value (whether unsent data remains) 1606 std::optional<bool> expected_more; 1607 1608 while (true) { 1609 if (it != node.vSendMsg.end()) { 1610 // If possible, move one message from the send queue to the transport. This fails when 1611 // there is an existing message still being sent, or (for v2 transports) when the 1612 // handshake has not yet completed. 1613 size_t memusage = it->GetMemoryUsage(); 1614 if (node.m_transport->SetMessageToSend(*it)) { 1615 // Update memory usage of send buffer (as *it will be deleted). 1616 node.m_send_memusage -= memusage; 1617 ++it; 1618 } 1619 } 1620 const auto& [data, more, msg_type] = node.m_transport->GetBytesToSend(it != node.vSendMsg.end()); 1621 // We rely on the 'more' value returned by GetBytesToSend to correctly predict whether more 1622 // bytes are still to be sent, to correctly set the MSG_MORE flag. As a sanity check, 1623 // verify that the previously returned 'more' was correct. 1624 if (expected_more.has_value()) Assume(!data.empty() == *expected_more); 1625 expected_more = more; 1626 data_left = !data.empty(); // will be overwritten on next loop if all of data gets sent 1627 int nBytes = 0; 1628 if (!data.empty()) { 1629 LOCK(node.m_sock_mutex); 1630 // There is no socket in case we've already disconnected, or in test cases without 1631 // real connections. In these cases, we bail out immediately and just leave things 1632 // in the send queue and transport. 1633 if (!node.m_sock) { 1634 break; 1635 } 1636 int flags = MSG_NOSIGNAL | MSG_DONTWAIT; 1637 #ifdef MSG_MORE 1638 if (more) { 1639 flags |= MSG_MORE; 1640 } 1641 #endif 1642 nBytes = node.m_sock->Send(reinterpret_cast<const char*>(data.data()), data.size(), flags); 1643 } 1644 if (nBytes > 0) { 1645 node.m_last_send = GetTime<std::chrono::seconds>(); 1646 node.nSendBytes += nBytes; 1647 // Notify transport that bytes have been processed. 1648 node.m_transport->MarkBytesSent(nBytes); 1649 // Update statistics per message type. 1650 if (!msg_type.empty()) { // don't report v2 handshake bytes for now 1651 node.AccountForSentBytes(msg_type, nBytes); 1652 } 1653 nSentSize += nBytes; 1654 if ((size_t)nBytes != data.size()) { 1655 // could not send full message; stop sending more 1656 break; 1657 } 1658 } else { 1659 if (nBytes < 0) { 1660 // error 1661 int nErr = WSAGetLastError(); 1662 if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) { 1663 LogDebug(BCLog::NET, "socket send error, %s: %s\n", node.DisconnectMsg(fLogIPs), NetworkErrorString(nErr)); 1664 node.CloseSocketDisconnect(); 1665 } 1666 } 1667 break; 1668 } 1669 } 1670 1671 node.fPauseSend = node.m_send_memusage + node.m_transport->GetSendMemoryUsage() > nSendBufferMaxSize; 1672 1673 if (it == node.vSendMsg.end()) { 1674 assert(node.m_send_memusage == 0); 1675 } 1676 node.vSendMsg.erase(node.vSendMsg.begin(), it); 1677 return {nSentSize, data_left}; 1678 } 1679 1680 /** Try to find a connection to evict when the node is full. 1681 * Extreme care must be taken to avoid opening the node to attacker 1682 * triggered network partitioning. 1683 * The strategy used here is to protect a small number of peers 1684 * for each of several distinct characteristics which are difficult 1685 * to forge. In order to partition a node the attacker must be 1686 * simultaneously better at all of them than honest peers. 1687 */ 1688 bool CConnman::AttemptToEvictConnection() 1689 { 1690 std::vector<NodeEvictionCandidate> vEvictionCandidates; 1691 { 1692 1693 LOCK(m_nodes_mutex); 1694 for (const CNode* node : m_nodes) { 1695 if (node->fDisconnect) 1696 continue; 1697 NodeEvictionCandidate candidate{ 1698 .id = node->GetId(), 1699 .m_connected = node->m_connected, 1700 .m_min_ping_time = node->m_min_ping_time, 1701 .m_last_block_time = node->m_last_block_time, 1702 .m_last_tx_time = node->m_last_tx_time, 1703 .fRelevantServices = node->m_has_all_wanted_services, 1704 .m_relay_txs = node->m_relays_txs.load(), 1705 .fBloomFilter = node->m_bloom_filter_loaded.load(), 1706 .nKeyedNetGroup = node->nKeyedNetGroup, 1707 .prefer_evict = node->m_prefer_evict, 1708 .m_is_local = node->addr.IsLocal(), 1709 .m_network = node->ConnectedThroughNetwork(), 1710 .m_noban = node->HasPermission(NetPermissionFlags::NoBan), 1711 .m_conn_type = node->m_conn_type, 1712 }; 1713 vEvictionCandidates.push_back(candidate); 1714 } 1715 } 1716 const std::optional<NodeId> node_id_to_evict = SelectNodeToEvict(std::move(vEvictionCandidates)); 1717 if (!node_id_to_evict) { 1718 return false; 1719 } 1720 LOCK(m_nodes_mutex); 1721 for (CNode* pnode : m_nodes) { 1722 if (pnode->GetId() == *node_id_to_evict) { 1723 LogDebug(BCLog::NET, "selected %s connection for eviction, %s", pnode->ConnectionTypeAsString(), pnode->DisconnectMsg(fLogIPs)); 1724 TRACEPOINT(net, evicted_inbound_connection, 1725 pnode->GetId(), 1726 pnode->m_addr_name.c_str(), 1727 pnode->ConnectionTypeAsString().c_str(), 1728 pnode->ConnectedThroughNetwork(), 1729 Ticks<std::chrono::seconds>(pnode->m_connected)); 1730 pnode->fDisconnect = true; 1731 return true; 1732 } 1733 } 1734 return false; 1735 } 1736 1737 void CConnman::AcceptConnection(const ListenSocket& hListenSocket) { 1738 struct sockaddr_storage sockaddr; 1739 socklen_t len = sizeof(sockaddr); 1740 auto sock = hListenSocket.sock->Accept((struct sockaddr*)&sockaddr, &len); 1741 1742 if (!sock) { 1743 const int nErr = WSAGetLastError(); 1744 if (nErr != WSAEWOULDBLOCK) { 1745 LogPrintf("socket error accept failed: %s\n", NetworkErrorString(nErr)); 1746 } 1747 return; 1748 } 1749 1750 CService addr; 1751 if (!addr.SetSockAddr((const struct sockaddr*)&sockaddr, len)) { 1752 LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "Unknown socket family\n"); 1753 } else { 1754 addr = MaybeFlipIPv6toCJDNS(addr); 1755 } 1756 1757 const CService addr_bind{MaybeFlipIPv6toCJDNS(GetBindAddress(*sock))}; 1758 1759 NetPermissionFlags permission_flags = NetPermissionFlags::None; 1760 hListenSocket.AddSocketPermissionFlags(permission_flags); 1761 1762 CreateNodeFromAcceptedSocket(std::move(sock), permission_flags, addr_bind, addr); 1763 } 1764 1765 void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock, 1766 NetPermissionFlags permission_flags, 1767 const CService& addr_bind, 1768 const CService& addr) 1769 { 1770 int nInbound = 0; 1771 1772 AddWhitelistPermissionFlags(permission_flags, addr, vWhitelistedRangeIncoming); 1773 1774 { 1775 LOCK(m_nodes_mutex); 1776 for (const CNode* pnode : m_nodes) { 1777 if (pnode->IsInboundConn()) nInbound++; 1778 } 1779 } 1780 1781 if (!fNetworkActive) { 1782 LogDebug(BCLog::NET, "connection from %s dropped: not accepting new connections\n", addr.ToStringAddrPort()); 1783 return; 1784 } 1785 1786 if (!sock->IsSelectable()) { 1787 LogPrintf("connection from %s dropped: non-selectable socket\n", addr.ToStringAddrPort()); 1788 return; 1789 } 1790 1791 // According to the internet TCP_NODELAY is not carried into accepted sockets 1792 // on all platforms. Set it again here just to be sure. 1793 const int on{1}; 1794 if (sock->SetSockOpt(IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)) == SOCKET_ERROR) { 1795 LogDebug(BCLog::NET, "connection from %s: unable to set TCP_NODELAY, continuing anyway\n", 1796 addr.ToStringAddrPort()); 1797 } 1798 1799 // Don't accept connections from banned peers. 1800 bool banned = m_banman && m_banman->IsBanned(addr); 1801 if (!NetPermissions::HasFlag(permission_flags, NetPermissionFlags::NoBan) && banned) 1802 { 1803 LogDebug(BCLog::NET, "connection from %s dropped (banned)\n", addr.ToStringAddrPort()); 1804 return; 1805 } 1806 1807 // Only accept connections from discouraged peers if our inbound slots aren't (almost) full. 1808 bool discouraged = m_banman && m_banman->IsDiscouraged(addr); 1809 if (!NetPermissions::HasFlag(permission_flags, NetPermissionFlags::NoBan) && nInbound + 1 >= m_max_inbound && discouraged) 1810 { 1811 LogDebug(BCLog::NET, "connection from %s dropped (discouraged)\n", addr.ToStringAddrPort()); 1812 return; 1813 } 1814 1815 if (nInbound >= m_max_inbound) 1816 { 1817 if (!AttemptToEvictConnection()) { 1818 // No connection to evict, disconnect the new connection 1819 LogDebug(BCLog::NET, "failed to find an eviction candidate - connection dropped (full)\n"); 1820 return; 1821 } 1822 } 1823 1824 NodeId id = GetNewNodeId(); 1825 uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize(); 1826 1827 const bool inbound_onion = std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) != m_onion_binds.end(); 1828 // The V2Transport transparently falls back to V1 behavior when an incoming V1 connection is 1829 // detected, so use it whenever we signal NODE_P2P_V2. 1830 ServiceFlags local_services = GetLocalServices(); 1831 const bool use_v2transport(local_services & NODE_P2P_V2); 1832 1833 CNode* pnode = new CNode(id, 1834 std::move(sock), 1835 CAddress{addr, NODE_NONE}, 1836 CalculateKeyedNetGroup(addr), 1837 nonce, 1838 addr_bind, 1839 /*addrNameIn=*/"", 1840 ConnectionType::INBOUND, 1841 inbound_onion, 1842 CNodeOptions{ 1843 .permission_flags = permission_flags, 1844 .prefer_evict = discouraged, 1845 .recv_flood_size = nReceiveFloodSize, 1846 .use_v2transport = use_v2transport, 1847 }); 1848 pnode->AddRef(); 1849 m_msgproc->InitializeNode(*pnode, local_services); 1850 { 1851 LOCK(m_nodes_mutex); 1852 m_nodes.push_back(pnode); 1853 } 1854 LogDebug(BCLog::NET, "connection from %s accepted\n", addr.ToStringAddrPort()); 1855 TRACEPOINT(net, inbound_connection, 1856 pnode->GetId(), 1857 pnode->m_addr_name.c_str(), 1858 pnode->ConnectionTypeAsString().c_str(), 1859 pnode->ConnectedThroughNetwork(), 1860 GetNodeCount(ConnectionDirection::In)); 1861 1862 // We received a new connection, harvest entropy from the time (and our peer count) 1863 RandAddEvent((uint32_t)id); 1864 } 1865 1866 bool CConnman::AddConnection(const std::string& address, ConnectionType conn_type, bool use_v2transport = false) 1867 { 1868 AssertLockNotHeld(m_unused_i2p_sessions_mutex); 1869 std::optional<int> max_connections; 1870 switch (conn_type) { 1871 case ConnectionType::INBOUND: 1872 case ConnectionType::MANUAL: 1873 return false; 1874 case ConnectionType::OUTBOUND_FULL_RELAY: 1875 max_connections = m_max_outbound_full_relay; 1876 break; 1877 case ConnectionType::BLOCK_RELAY: 1878 max_connections = m_max_outbound_block_relay; 1879 break; 1880 // no limit for ADDR_FETCH because -seednode has no limit either 1881 case ConnectionType::ADDR_FETCH: 1882 break; 1883 // no limit for FEELER connections since they're short-lived 1884 case ConnectionType::FEELER: 1885 break; 1886 } // no default case, so the compiler can warn about missing cases 1887 1888 // Count existing connections 1889 int existing_connections = WITH_LOCK(m_nodes_mutex, 1890 return std::count_if(m_nodes.begin(), m_nodes.end(), [conn_type](CNode* node) { return node->m_conn_type == conn_type; });); 1891 1892 // Max connections of specified type already exist 1893 if (max_connections != std::nullopt && existing_connections >= max_connections) return false; 1894 1895 // Max total outbound connections already exist 1896 CSemaphoreGrant grant(*semOutbound, true); 1897 if (!grant) return false; 1898 1899 OpenNetworkConnection(CAddress(), false, std::move(grant), address.c_str(), conn_type, /*use_v2transport=*/use_v2transport); 1900 return true; 1901 } 1902 1903 void CConnman::DisconnectNodes() 1904 { 1905 AssertLockNotHeld(m_nodes_mutex); 1906 AssertLockNotHeld(m_reconnections_mutex); 1907 1908 // Use a temporary variable to accumulate desired reconnections, so we don't need 1909 // m_reconnections_mutex while holding m_nodes_mutex. 1910 decltype(m_reconnections) reconnections_to_add; 1911 1912 { 1913 LOCK(m_nodes_mutex); 1914 1915 const bool network_active{fNetworkActive}; 1916 if (!network_active) { 1917 // Disconnect any connected nodes 1918 for (CNode* pnode : m_nodes) { 1919 if (!pnode->fDisconnect) { 1920 LogDebug(BCLog::NET, "Network not active, %s\n", pnode->DisconnectMsg(fLogIPs)); 1921 pnode->fDisconnect = true; 1922 } 1923 } 1924 } 1925 1926 // Disconnect unused nodes 1927 std::vector<CNode*> nodes_copy = m_nodes; 1928 for (CNode* pnode : nodes_copy) 1929 { 1930 if (pnode->fDisconnect) 1931 { 1932 // remove from m_nodes 1933 m_nodes.erase(remove(m_nodes.begin(), m_nodes.end(), pnode), m_nodes.end()); 1934 1935 // Add to reconnection list if appropriate. We don't reconnect right here, because 1936 // the creation of a connection is a blocking operation (up to several seconds), 1937 // and we don't want to hold up the socket handler thread for that long. 1938 if (network_active && pnode->m_transport->ShouldReconnectV1()) { 1939 reconnections_to_add.push_back({ 1940 .addr_connect = pnode->addr, 1941 .grant = std::move(pnode->grantOutbound), 1942 .destination = pnode->m_dest, 1943 .conn_type = pnode->m_conn_type, 1944 .use_v2transport = false}); 1945 LogDebug(BCLog::NET, "retrying with v1 transport protocol for peer=%d\n", pnode->GetId()); 1946 } 1947 1948 // release outbound grant (if any) 1949 pnode->grantOutbound.Release(); 1950 1951 // close socket and cleanup 1952 pnode->CloseSocketDisconnect(); 1953 1954 // update connection count by network 1955 if (pnode->IsManualOrFullOutboundConn()) --m_network_conn_counts[pnode->addr.GetNetwork()]; 1956 1957 // hold in disconnected pool until all refs are released 1958 pnode->Release(); 1959 m_nodes_disconnected.push_back(pnode); 1960 } 1961 } 1962 } 1963 { 1964 // Delete disconnected nodes 1965 std::list<CNode*> nodes_disconnected_copy = m_nodes_disconnected; 1966 for (CNode* pnode : nodes_disconnected_copy) 1967 { 1968 // Destroy the object only after other threads have stopped using it. 1969 if (pnode->GetRefCount() <= 0) { 1970 m_nodes_disconnected.remove(pnode); 1971 DeleteNode(pnode); 1972 } 1973 } 1974 } 1975 { 1976 // Move entries from reconnections_to_add to m_reconnections. 1977 LOCK(m_reconnections_mutex); 1978 m_reconnections.splice(m_reconnections.end(), std::move(reconnections_to_add)); 1979 } 1980 } 1981 1982 void CConnman::NotifyNumConnectionsChanged() 1983 { 1984 size_t nodes_size; 1985 { 1986 LOCK(m_nodes_mutex); 1987 nodes_size = m_nodes.size(); 1988 } 1989 if(nodes_size != nPrevNodeCount) { 1990 nPrevNodeCount = nodes_size; 1991 if (m_client_interface) { 1992 m_client_interface->NotifyNumConnectionsChanged(nodes_size); 1993 } 1994 } 1995 } 1996 1997 bool CConnman::ShouldRunInactivityChecks(const CNode& node, std::chrono::seconds now) const 1998 { 1999 return node.m_connected + m_peer_connect_timeout < now; 2000 } 2001 2002 bool CConnman::InactivityCheck(const CNode& node) const 2003 { 2004 // Tests that see disconnects after using mocktime can start nodes with a 2005 // large timeout. For example, -peertimeout=999999999. 2006 const auto now{GetTime<std::chrono::seconds>()}; 2007 const auto last_send{node.m_last_send.load()}; 2008 const auto last_recv{node.m_last_recv.load()}; 2009 2010 if (!ShouldRunInactivityChecks(node, now)) return false; 2011 2012 bool has_received{last_recv.count() != 0}; 2013 bool has_sent{last_send.count() != 0}; 2014 2015 if (!has_received || !has_sent) { 2016 std::string has_never; 2017 if (!has_received) has_never += ", never received from peer"; 2018 if (!has_sent) has_never += ", never sent to peer"; 2019 LogDebug(BCLog::NET, 2020 "socket no message in first %i seconds%s, %s\n", 2021 count_seconds(m_peer_connect_timeout), 2022 has_never, 2023 node.DisconnectMsg(fLogIPs) 2024 ); 2025 return true; 2026 } 2027 2028 if (now > last_send + TIMEOUT_INTERVAL) { 2029 LogDebug(BCLog::NET, 2030 "socket sending timeout: %is, %s\n", count_seconds(now - last_send), 2031 node.DisconnectMsg(fLogIPs) 2032 ); 2033 return true; 2034 } 2035 2036 if (now > last_recv + TIMEOUT_INTERVAL) { 2037 LogDebug(BCLog::NET, 2038 "socket receive timeout: %is, %s\n", count_seconds(now - last_recv), 2039 node.DisconnectMsg(fLogIPs) 2040 ); 2041 return true; 2042 } 2043 2044 if (!node.fSuccessfullyConnected) { 2045 if (node.m_transport->GetInfo().transport_type == TransportProtocolType::DETECTING) { 2046 LogDebug(BCLog::NET, "V2 handshake timeout, %s\n", node.DisconnectMsg(fLogIPs)); 2047 } else { 2048 LogDebug(BCLog::NET, "version handshake timeout, %s\n", node.DisconnectMsg(fLogIPs)); 2049 } 2050 return true; 2051 } 2052 2053 return false; 2054 } 2055 2056 Sock::EventsPerSock CConnman::GenerateWaitSockets(std::span<CNode* const> nodes) 2057 { 2058 Sock::EventsPerSock events_per_sock; 2059 2060 for (const ListenSocket& hListenSocket : vhListenSocket) { 2061 events_per_sock.emplace(hListenSocket.sock, Sock::Events{Sock::RECV}); 2062 } 2063 2064 for (CNode* pnode : nodes) { 2065 bool select_recv = !pnode->fPauseRecv; 2066 bool select_send; 2067 { 2068 LOCK(pnode->cs_vSend); 2069 // Sending is possible if either there are bytes to send right now, or if there will be 2070 // once a potential message from vSendMsg is handed to the transport. GetBytesToSend 2071 // determines both of these in a single call. 2072 const auto& [to_send, more, _msg_type] = pnode->m_transport->GetBytesToSend(!pnode->vSendMsg.empty()); 2073 select_send = !to_send.empty() || more; 2074 } 2075 if (!select_recv && !select_send) continue; 2076 2077 LOCK(pnode->m_sock_mutex); 2078 if (pnode->m_sock) { 2079 Sock::Event event = (select_send ? Sock::SEND : 0) | (select_recv ? Sock::RECV : 0); 2080 events_per_sock.emplace(pnode->m_sock, Sock::Events{event}); 2081 } 2082 } 2083 2084 return events_per_sock; 2085 } 2086 2087 void CConnman::SocketHandler() 2088 { 2089 AssertLockNotHeld(m_total_bytes_sent_mutex); 2090 2091 Sock::EventsPerSock events_per_sock; 2092 2093 { 2094 const NodesSnapshot snap{*this, /*shuffle=*/false}; 2095 2096 const auto timeout = std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS); 2097 2098 // Check for the readiness of the already connected sockets and the 2099 // listening sockets in one call ("readiness" as in poll(2) or 2100 // select(2)). If none are ready, wait for a short while and return 2101 // empty sets. 2102 events_per_sock = GenerateWaitSockets(snap.Nodes()); 2103 if (events_per_sock.empty() || !events_per_sock.begin()->first->WaitMany(timeout, events_per_sock)) { 2104 interruptNet.sleep_for(timeout); 2105 } 2106 2107 // Service (send/receive) each of the already connected nodes. 2108 SocketHandlerConnected(snap.Nodes(), events_per_sock); 2109 } 2110 2111 // Accept new connections from listening sockets. 2112 SocketHandlerListening(events_per_sock); 2113 } 2114 2115 void CConnman::SocketHandlerConnected(const std::vector<CNode*>& nodes, 2116 const Sock::EventsPerSock& events_per_sock) 2117 { 2118 AssertLockNotHeld(m_total_bytes_sent_mutex); 2119 2120 for (CNode* pnode : nodes) { 2121 if (interruptNet) 2122 return; 2123 2124 // 2125 // Receive 2126 // 2127 bool recvSet = false; 2128 bool sendSet = false; 2129 bool errorSet = false; 2130 { 2131 LOCK(pnode->m_sock_mutex); 2132 if (!pnode->m_sock) { 2133 continue; 2134 } 2135 const auto it = events_per_sock.find(pnode->m_sock); 2136 if (it != events_per_sock.end()) { 2137 recvSet = it->second.occurred & Sock::RECV; 2138 sendSet = it->second.occurred & Sock::SEND; 2139 errorSet = it->second.occurred & Sock::ERR; 2140 } 2141 } 2142 2143 if (sendSet) { 2144 // Send data 2145 auto [bytes_sent, data_left] = WITH_LOCK(pnode->cs_vSend, return SocketSendData(*pnode)); 2146 if (bytes_sent) { 2147 RecordBytesSent(bytes_sent); 2148 2149 // If both receiving and (non-optimistic) sending were possible, we first attempt 2150 // sending. If that succeeds, but does not fully drain the send queue, do not 2151 // attempt to receive. This avoids needlessly queueing data if the remote peer 2152 // is slow at receiving data, by means of TCP flow control. We only do this when 2153 // sending actually succeeded to make sure progress is always made; otherwise a 2154 // deadlock would be possible when both sides have data to send, but neither is 2155 // receiving. 2156 if (data_left) recvSet = false; 2157 } 2158 } 2159 2160 if (recvSet || errorSet) 2161 { 2162 // typical socket buffer is 8K-64K 2163 uint8_t pchBuf[0x10000]; 2164 int nBytes = 0; 2165 { 2166 LOCK(pnode->m_sock_mutex); 2167 if (!pnode->m_sock) { 2168 continue; 2169 } 2170 nBytes = pnode->m_sock->Recv(pchBuf, sizeof(pchBuf), MSG_DONTWAIT); 2171 } 2172 if (nBytes > 0) 2173 { 2174 bool notify = false; 2175 if (!pnode->ReceiveMsgBytes({pchBuf, (size_t)nBytes}, notify)) { 2176 LogDebug(BCLog::NET, 2177 "receiving message bytes failed, %s\n", 2178 pnode->DisconnectMsg(fLogIPs) 2179 ); 2180 pnode->CloseSocketDisconnect(); 2181 } 2182 RecordBytesRecv(nBytes); 2183 if (notify) { 2184 pnode->MarkReceivedMsgsForProcessing(); 2185 WakeMessageHandler(); 2186 } 2187 } 2188 else if (nBytes == 0) 2189 { 2190 // socket closed gracefully 2191 if (!pnode->fDisconnect) { 2192 LogDebug(BCLog::NET, "socket closed, %s\n", pnode->DisconnectMsg(fLogIPs)); 2193 } 2194 pnode->CloseSocketDisconnect(); 2195 } 2196 else if (nBytes < 0) 2197 { 2198 // error 2199 int nErr = WSAGetLastError(); 2200 if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) 2201 { 2202 if (!pnode->fDisconnect) { 2203 LogDebug(BCLog::NET, "socket recv error, %s: %s\n", pnode->DisconnectMsg(fLogIPs), NetworkErrorString(nErr)); 2204 } 2205 pnode->CloseSocketDisconnect(); 2206 } 2207 } 2208 } 2209 2210 if (InactivityCheck(*pnode)) pnode->fDisconnect = true; 2211 } 2212 } 2213 2214 void CConnman::SocketHandlerListening(const Sock::EventsPerSock& events_per_sock) 2215 { 2216 for (const ListenSocket& listen_socket : vhListenSocket) { 2217 if (interruptNet) { 2218 return; 2219 } 2220 const auto it = events_per_sock.find(listen_socket.sock); 2221 if (it != events_per_sock.end() && it->second.occurred & Sock::RECV) { 2222 AcceptConnection(listen_socket); 2223 } 2224 } 2225 } 2226 2227 void CConnman::ThreadSocketHandler() 2228 { 2229 AssertLockNotHeld(m_total_bytes_sent_mutex); 2230 2231 while (!interruptNet) 2232 { 2233 DisconnectNodes(); 2234 NotifyNumConnectionsChanged(); 2235 SocketHandler(); 2236 } 2237 } 2238 2239 void CConnman::WakeMessageHandler() 2240 { 2241 { 2242 LOCK(mutexMsgProc); 2243 fMsgProcWake = true; 2244 } 2245 condMsgProc.notify_one(); 2246 } 2247 2248 void CConnman::ThreadDNSAddressSeed() 2249 { 2250 int outbound_connection_count = 0; 2251 2252 if (!gArgs.GetArgs("-seednode").empty()) { 2253 auto start = NodeClock::now(); 2254 constexpr std::chrono::seconds SEEDNODE_TIMEOUT = 30s; 2255 LogPrintf("-seednode enabled. Trying the provided seeds for %d seconds before defaulting to the dnsseeds.\n", SEEDNODE_TIMEOUT.count()); 2256 while (!interruptNet) { 2257 if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) 2258 return; 2259 2260 // Abort if we have spent enough time without reaching our target. 2261 // Giving seed nodes 30 seconds so this does not become a race against fixedseeds (which triggers after 1 min) 2262 if (NodeClock::now() > start + SEEDNODE_TIMEOUT) { 2263 LogPrintf("Couldn't connect to enough peers via seed nodes. Handing fetch logic to the DNS seeds.\n"); 2264 break; 2265 } 2266 2267 outbound_connection_count = GetFullOutboundConnCount(); 2268 if (outbound_connection_count >= SEED_OUTBOUND_CONNECTION_THRESHOLD) { 2269 LogPrintf("P2P peers available. Finished fetching data from seed nodes.\n"); 2270 break; 2271 } 2272 } 2273 } 2274 2275 FastRandomContext rng; 2276 std::vector<std::string> seeds = m_params.DNSSeeds(); 2277 std::shuffle(seeds.begin(), seeds.end(), rng); 2278 int seeds_right_now = 0; // Number of seeds left before testing if we have enough connections 2279 2280 if (gArgs.GetBoolArg("-forcednsseed", DEFAULT_FORCEDNSSEED)) { 2281 // When -forcednsseed is provided, query all. 2282 seeds_right_now = seeds.size(); 2283 } else if (addrman.Size() == 0) { 2284 // If we have no known peers, query all. 2285 // This will occur on the first run, or if peers.dat has been 2286 // deleted. 2287 seeds_right_now = seeds.size(); 2288 } 2289 2290 // Proceed with dnsseeds if seednodes hasn't reached the target or if forcednsseed is set 2291 if (outbound_connection_count < SEED_OUTBOUND_CONNECTION_THRESHOLD || seeds_right_now) { 2292 // goal: only query DNS seed if address need is acute 2293 // * If we have a reasonable number of peers in addrman, spend 2294 // some time trying them first. This improves user privacy by 2295 // creating fewer identifying DNS requests, reduces trust by 2296 // giving seeds less influence on the network topology, and 2297 // reduces traffic to the seeds. 2298 // * When querying DNS seeds query a few at once, this ensures 2299 // that we don't give DNS seeds the ability to eclipse nodes 2300 // that query them. 2301 // * If we continue having problems, eventually query all the 2302 // DNS seeds, and if that fails too, also try the fixed seeds. 2303 // (done in ThreadOpenConnections) 2304 int found = 0; 2305 const std::chrono::seconds seeds_wait_time = (addrman.Size() >= DNSSEEDS_DELAY_PEER_THRESHOLD ? DNSSEEDS_DELAY_MANY_PEERS : DNSSEEDS_DELAY_FEW_PEERS); 2306 2307 for (const std::string& seed : seeds) { 2308 if (seeds_right_now == 0) { 2309 seeds_right_now += DNSSEEDS_TO_QUERY_AT_ONCE; 2310 2311 if (addrman.Size() > 0) { 2312 LogPrintf("Waiting %d seconds before querying DNS seeds.\n", seeds_wait_time.count()); 2313 std::chrono::seconds to_wait = seeds_wait_time; 2314 while (to_wait.count() > 0) { 2315 // if sleeping for the MANY_PEERS interval, wake up 2316 // early to see if we have enough peers and can stop 2317 // this thread entirely freeing up its resources 2318 std::chrono::seconds w = std::min(DNSSEEDS_DELAY_FEW_PEERS, to_wait); 2319 if (!interruptNet.sleep_for(w)) return; 2320 to_wait -= w; 2321 2322 if (GetFullOutboundConnCount() >= SEED_OUTBOUND_CONNECTION_THRESHOLD) { 2323 if (found > 0) { 2324 LogPrintf("%d addresses found from DNS seeds\n", found); 2325 LogPrintf("P2P peers available. Finished DNS seeding.\n"); 2326 } else { 2327 LogPrintf("P2P peers available. Skipped DNS seeding.\n"); 2328 } 2329 return; 2330 } 2331 } 2332 } 2333 } 2334 2335 if (interruptNet) return; 2336 2337 // hold off on querying seeds if P2P network deactivated 2338 if (!fNetworkActive) { 2339 LogPrintf("Waiting for network to be reactivated before querying DNS seeds.\n"); 2340 do { 2341 if (!interruptNet.sleep_for(std::chrono::seconds{1})) return; 2342 } while (!fNetworkActive); 2343 } 2344 2345 LogPrintf("Loading addresses from DNS seed %s\n", seed); 2346 // If -proxy is in use, we make an ADDR_FETCH connection to the DNS resolved peer address 2347 // for the base dns seed domain in chainparams 2348 if (HaveNameProxy()) { 2349 AddAddrFetch(seed); 2350 } else { 2351 std::vector<CAddress> vAdd; 2352 constexpr ServiceFlags requiredServiceBits{SeedsServiceFlags()}; 2353 std::string host = strprintf("x%x.%s", requiredServiceBits, seed); 2354 CNetAddr resolveSource; 2355 if (!resolveSource.SetInternal(host)) { 2356 continue; 2357 } 2358 // Limit number of IPs learned from a single DNS seed. This limit exists to prevent the results from 2359 // one DNS seed from dominating AddrMan. Note that the number of results from a UDP DNS query is 2360 // bounded to 33 already, but it is possible for it to use TCP where a larger number of results can be 2361 // returned. 2362 unsigned int nMaxIPs = 32; 2363 const auto addresses{LookupHost(host, nMaxIPs, true)}; 2364 if (!addresses.empty()) { 2365 for (const CNetAddr& ip : addresses) { 2366 CAddress addr = CAddress(CService(ip, m_params.GetDefaultPort()), requiredServiceBits); 2367 addr.nTime = rng.rand_uniform_delay(Now<NodeSeconds>() - 3 * 24h, -4 * 24h); // use a random age between 3 and 7 days old 2368 vAdd.push_back(addr); 2369 found++; 2370 } 2371 addrman.Add(vAdd, resolveSource); 2372 } else { 2373 // If the seed does not support a subdomain with our desired service bits, 2374 // we make an ADDR_FETCH connection to the DNS resolved peer address for the 2375 // base dns seed domain in chainparams 2376 AddAddrFetch(seed); 2377 } 2378 } 2379 --seeds_right_now; 2380 } 2381 LogPrintf("%d addresses found from DNS seeds\n", found); 2382 } else { 2383 LogPrintf("Skipping DNS seeds. Enough peers have been found\n"); 2384 } 2385 } 2386 2387 void CConnman::DumpAddresses() 2388 { 2389 const auto start{SteadyClock::now()}; 2390 2391 DumpPeerAddresses(::gArgs, addrman); 2392 2393 LogDebug(BCLog::NET, "Flushed %d addresses to peers.dat %dms\n", 2394 addrman.Size(), Ticks<std::chrono::milliseconds>(SteadyClock::now() - start)); 2395 } 2396 2397 void CConnman::ProcessAddrFetch() 2398 { 2399 AssertLockNotHeld(m_unused_i2p_sessions_mutex); 2400 std::string strDest; 2401 { 2402 LOCK(m_addr_fetches_mutex); 2403 if (m_addr_fetches.empty()) 2404 return; 2405 strDest = m_addr_fetches.front(); 2406 m_addr_fetches.pop_front(); 2407 } 2408 // Attempt v2 connection if we support v2 - we'll reconnect with v1 if our 2409 // peer doesn't support it or immediately disconnects us for another reason. 2410 const bool use_v2transport(GetLocalServices() & NODE_P2P_V2); 2411 CAddress addr; 2412 CSemaphoreGrant grant(*semOutbound, /*fTry=*/true); 2413 if (grant) { 2414 OpenNetworkConnection(addr, false, std::move(grant), strDest.c_str(), ConnectionType::ADDR_FETCH, use_v2transport); 2415 } 2416 } 2417 2418 bool CConnman::GetTryNewOutboundPeer() const 2419 { 2420 return m_try_another_outbound_peer; 2421 } 2422 2423 void CConnman::SetTryNewOutboundPeer(bool flag) 2424 { 2425 m_try_another_outbound_peer = flag; 2426 LogDebug(BCLog::NET, "setting try another outbound peer=%s\n", flag ? "true" : "false"); 2427 } 2428 2429 void CConnman::StartExtraBlockRelayPeers() 2430 { 2431 LogDebug(BCLog::NET, "enabling extra block-relay-only peers\n"); 2432 m_start_extra_block_relay_peers = true; 2433 } 2434 2435 // Return the number of outbound connections that are full relay (not blocks only) 2436 int CConnman::GetFullOutboundConnCount() const 2437 { 2438 int nRelevant = 0; 2439 { 2440 LOCK(m_nodes_mutex); 2441 for (const CNode* pnode : m_nodes) { 2442 if (pnode->fSuccessfullyConnected && pnode->IsFullOutboundConn()) ++nRelevant; 2443 } 2444 } 2445 return nRelevant; 2446 } 2447 2448 // Return the number of peers we have over our outbound connection limit 2449 // Exclude peers that are marked for disconnect, or are going to be 2450 // disconnected soon (eg ADDR_FETCH and FEELER) 2451 // Also exclude peers that haven't finished initial connection handshake yet 2452 // (so that we don't decide we're over our desired connection limit, and then 2453 // evict some peer that has finished the handshake) 2454 int CConnman::GetExtraFullOutboundCount() const 2455 { 2456 int full_outbound_peers = 0; 2457 { 2458 LOCK(m_nodes_mutex); 2459 for (const CNode* pnode : m_nodes) { 2460 if (pnode->fSuccessfullyConnected && !pnode->fDisconnect && pnode->IsFullOutboundConn()) { 2461 ++full_outbound_peers; 2462 } 2463 } 2464 } 2465 return std::max(full_outbound_peers - m_max_outbound_full_relay, 0); 2466 } 2467 2468 int CConnman::GetExtraBlockRelayCount() const 2469 { 2470 int block_relay_peers = 0; 2471 { 2472 LOCK(m_nodes_mutex); 2473 for (const CNode* pnode : m_nodes) { 2474 if (pnode->fSuccessfullyConnected && !pnode->fDisconnect && pnode->IsBlockOnlyConn()) { 2475 ++block_relay_peers; 2476 } 2477 } 2478 } 2479 return std::max(block_relay_peers - m_max_outbound_block_relay, 0); 2480 } 2481 2482 std::unordered_set<Network> CConnman::GetReachableEmptyNetworks() const 2483 { 2484 std::unordered_set<Network> networks{}; 2485 for (int n = 0; n < NET_MAX; n++) { 2486 enum Network net = (enum Network)n; 2487 if (net == NET_UNROUTABLE || net == NET_INTERNAL) continue; 2488 if (g_reachable_nets.Contains(net) && addrman.Size(net, std::nullopt) == 0) { 2489 networks.insert(net); 2490 } 2491 } 2492 return networks; 2493 } 2494 2495 bool CConnman::MultipleManualOrFullOutboundConns(Network net) const 2496 { 2497 AssertLockHeld(m_nodes_mutex); 2498 return m_network_conn_counts[net] > 1; 2499 } 2500 2501 bool CConnman::MaybePickPreferredNetwork(std::optional<Network>& network) 2502 { 2503 std::array<Network, 5> nets{NET_IPV4, NET_IPV6, NET_ONION, NET_I2P, NET_CJDNS}; 2504 std::shuffle(nets.begin(), nets.end(), FastRandomContext()); 2505 2506 LOCK(m_nodes_mutex); 2507 for (const auto net : nets) { 2508 if (g_reachable_nets.Contains(net) && m_network_conn_counts[net] == 0 && addrman.Size(net) != 0) { 2509 network = net; 2510 return true; 2511 } 2512 } 2513 2514 return false; 2515 } 2516 2517 void CConnman::ThreadOpenConnections(const std::vector<std::string> connect, std::span<const std::string> seed_nodes) 2518 { 2519 AssertLockNotHeld(m_unused_i2p_sessions_mutex); 2520 AssertLockNotHeld(m_reconnections_mutex); 2521 FastRandomContext rng; 2522 // Connect to specific addresses 2523 if (!connect.empty()) 2524 { 2525 // Attempt v2 connection if we support v2 - we'll reconnect with v1 if our 2526 // peer doesn't support it or immediately disconnects us for another reason. 2527 const bool use_v2transport(GetLocalServices() & NODE_P2P_V2); 2528 for (int64_t nLoop = 0;; nLoop++) 2529 { 2530 for (const std::string& strAddr : connect) 2531 { 2532 CAddress addr(CService(), NODE_NONE); 2533 OpenNetworkConnection(addr, false, {}, strAddr.c_str(), ConnectionType::MANUAL, /*use_v2transport=*/use_v2transport); 2534 for (int i = 0; i < 10 && i < nLoop; i++) 2535 { 2536 if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) 2537 return; 2538 } 2539 } 2540 if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) 2541 return; 2542 PerformReconnections(); 2543 } 2544 } 2545 2546 // Initiate network connections 2547 auto start = GetTime<std::chrono::microseconds>(); 2548 2549 // Minimum time before next feeler connection (in microseconds). 2550 auto next_feeler = start + rng.rand_exp_duration(FEELER_INTERVAL); 2551 auto next_extra_block_relay = start + rng.rand_exp_duration(EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL); 2552 auto next_extra_network_peer{start + rng.rand_exp_duration(EXTRA_NETWORK_PEER_INTERVAL)}; 2553 const bool dnsseed = gArgs.GetBoolArg("-dnsseed", DEFAULT_DNSSEED); 2554 bool add_fixed_seeds = gArgs.GetBoolArg("-fixedseeds", DEFAULT_FIXEDSEEDS); 2555 const bool use_seednodes{!gArgs.GetArgs("-seednode").empty()}; 2556 2557 auto seed_node_timer = NodeClock::now(); 2558 bool add_addr_fetch{addrman.Size() == 0 && !seed_nodes.empty()}; 2559 constexpr std::chrono::seconds ADD_NEXT_SEEDNODE = 10s; 2560 2561 if (!add_fixed_seeds) { 2562 LogPrintf("Fixed seeds are disabled\n"); 2563 } 2564 2565 while (!interruptNet) 2566 { 2567 if (add_addr_fetch) { 2568 add_addr_fetch = false; 2569 const auto& seed{SpanPopBack(seed_nodes)}; 2570 AddAddrFetch(seed); 2571 2572 if (addrman.Size() == 0) { 2573 LogInfo("Empty addrman, adding seednode (%s) to addrfetch\n", seed); 2574 } else { 2575 LogInfo("Couldn't connect to peers from addrman after %d seconds. Adding seednode (%s) to addrfetch\n", ADD_NEXT_SEEDNODE.count(), seed); 2576 } 2577 } 2578 2579 ProcessAddrFetch(); 2580 2581 if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) 2582 return; 2583 2584 PerformReconnections(); 2585 2586 CSemaphoreGrant grant(*semOutbound); 2587 if (interruptNet) 2588 return; 2589 2590 const std::unordered_set<Network> fixed_seed_networks{GetReachableEmptyNetworks()}; 2591 if (add_fixed_seeds && !fixed_seed_networks.empty()) { 2592 // When the node starts with an empty peers.dat, there are a few other sources of peers before 2593 // we fallback on to fixed seeds: -dnsseed, -seednode, -addnode 2594 // If none of those are available, we fallback on to fixed seeds immediately, else we allow 2595 // 60 seconds for any of those sources to populate addrman. 2596 bool add_fixed_seeds_now = false; 2597 // It is cheapest to check if enough time has passed first. 2598 if (GetTime<std::chrono::seconds>() > start + std::chrono::minutes{1}) { 2599 add_fixed_seeds_now = true; 2600 LogPrintf("Adding fixed seeds as 60 seconds have passed and addrman is empty for at least one reachable network\n"); 2601 } 2602 2603 // Perform cheap checks before locking a mutex. 2604 else if (!dnsseed && !use_seednodes) { 2605 LOCK(m_added_nodes_mutex); 2606 if (m_added_node_params.empty()) { 2607 add_fixed_seeds_now = true; 2608 LogPrintf("Adding fixed seeds as -dnsseed=0 (or IPv4/IPv6 connections are disabled via -onlynet) and neither -addnode nor -seednode are provided\n"); 2609 } 2610 } 2611 2612 if (add_fixed_seeds_now) { 2613 std::vector<CAddress> seed_addrs{ConvertSeeds(m_params.FixedSeeds())}; 2614 // We will not make outgoing connections to peers that are unreachable 2615 // (e.g. because of -onlynet configuration). 2616 // Therefore, we do not add them to addrman in the first place. 2617 // In case previously unreachable networks become reachable 2618 // (e.g. in case of -onlynet changes by the user), fixed seeds will 2619 // be loaded only for networks for which we have no addresses. 2620 seed_addrs.erase(std::remove_if(seed_addrs.begin(), seed_addrs.end(), 2621 [&fixed_seed_networks](const CAddress& addr) { return fixed_seed_networks.count(addr.GetNetwork()) == 0; }), 2622 seed_addrs.end()); 2623 CNetAddr local; 2624 local.SetInternal("fixedseeds"); 2625 addrman.Add(seed_addrs, local); 2626 add_fixed_seeds = false; 2627 LogPrintf("Added %d fixed seeds from reachable networks.\n", seed_addrs.size()); 2628 } 2629 } 2630 2631 // 2632 // Choose an address to connect to based on most recently seen 2633 // 2634 CAddress addrConnect; 2635 2636 // Only connect out to one peer per ipv4/ipv6 network group (/16 for IPv4). 2637 int nOutboundFullRelay = 0; 2638 int nOutboundBlockRelay = 0; 2639 int outbound_privacy_network_peers = 0; 2640 std::set<std::vector<unsigned char>> outbound_ipv46_peer_netgroups; 2641 2642 { 2643 LOCK(m_nodes_mutex); 2644 for (const CNode* pnode : m_nodes) { 2645 if (pnode->IsFullOutboundConn()) nOutboundFullRelay++; 2646 if (pnode->IsBlockOnlyConn()) nOutboundBlockRelay++; 2647 2648 // Make sure our persistent outbound slots to ipv4/ipv6 peers belong to different netgroups. 2649 switch (pnode->m_conn_type) { 2650 // We currently don't take inbound connections into account. Since they are 2651 // free to make, an attacker could make them to prevent us from connecting to 2652 // certain peers. 2653 case ConnectionType::INBOUND: 2654 // Short-lived outbound connections should not affect how we select outbound 2655 // peers from addrman. 2656 case ConnectionType::ADDR_FETCH: 2657 case ConnectionType::FEELER: 2658 break; 2659 case ConnectionType::MANUAL: 2660 case ConnectionType::OUTBOUND_FULL_RELAY: 2661 case ConnectionType::BLOCK_RELAY: 2662 const CAddress address{pnode->addr}; 2663 if (address.IsTor() || address.IsI2P() || address.IsCJDNS()) { 2664 // Since our addrman-groups for these networks are 2665 // random, without relation to the route we 2666 // take to connect to these peers or to the 2667 // difficulty in obtaining addresses with diverse 2668 // groups, we don't worry about diversity with 2669 // respect to our addrman groups when connecting to 2670 // these networks. 2671 ++outbound_privacy_network_peers; 2672 } else { 2673 outbound_ipv46_peer_netgroups.insert(m_netgroupman.GetGroup(address)); 2674 } 2675 } // no default case, so the compiler can warn about missing cases 2676 } 2677 } 2678 2679 if (!seed_nodes.empty() && nOutboundFullRelay < SEED_OUTBOUND_CONNECTION_THRESHOLD) { 2680 if (NodeClock::now() > seed_node_timer + ADD_NEXT_SEEDNODE) { 2681 seed_node_timer = NodeClock::now(); 2682 add_addr_fetch = true; 2683 } 2684 } 2685 2686 ConnectionType conn_type = ConnectionType::OUTBOUND_FULL_RELAY; 2687 auto now = GetTime<std::chrono::microseconds>(); 2688 bool anchor = false; 2689 bool fFeeler = false; 2690 std::optional<Network> preferred_net; 2691 2692 // Determine what type of connection to open. Opening 2693 // BLOCK_RELAY connections to addresses from anchors.dat gets the highest 2694 // priority. Then we open OUTBOUND_FULL_RELAY priority until we 2695 // meet our full-relay capacity. Then we open BLOCK_RELAY connection 2696 // until we hit our block-relay-only peer limit. 2697 // GetTryNewOutboundPeer() gets set when a stale tip is detected, so we 2698 // try opening an additional OUTBOUND_FULL_RELAY connection. If none of 2699 // these conditions are met, check to see if it's time to try an extra 2700 // block-relay-only peer (to confirm our tip is current, see below) or the next_feeler 2701 // timer to decide if we should open a FEELER. 2702 2703 if (!m_anchors.empty() && (nOutboundBlockRelay < m_max_outbound_block_relay)) { 2704 conn_type = ConnectionType::BLOCK_RELAY; 2705 anchor = true; 2706 } else if (nOutboundFullRelay < m_max_outbound_full_relay) { 2707 // OUTBOUND_FULL_RELAY 2708 } else if (nOutboundBlockRelay < m_max_outbound_block_relay) { 2709 conn_type = ConnectionType::BLOCK_RELAY; 2710 } else if (GetTryNewOutboundPeer()) { 2711 // OUTBOUND_FULL_RELAY 2712 } else if (now > next_extra_block_relay && m_start_extra_block_relay_peers) { 2713 // Periodically connect to a peer (using regular outbound selection 2714 // methodology from addrman) and stay connected long enough to sync 2715 // headers, but not much else. 2716 // 2717 // Then disconnect the peer, if we haven't learned anything new. 2718 // 2719 // The idea is to make eclipse attacks very difficult to pull off, 2720 // because every few minutes we're finding a new peer to learn headers 2721 // from. 2722 // 2723 // This is similar to the logic for trying extra outbound (full-relay) 2724 // peers, except: 2725 // - we do this all the time on an exponential timer, rather than just when 2726 // our tip is stale 2727 // - we potentially disconnect our next-youngest block-relay-only peer, if our 2728 // newest block-relay-only peer delivers a block more recently. 2729 // See the eviction logic in net_processing.cpp. 2730 // 2731 // Because we can promote these connections to block-relay-only 2732 // connections, they do not get their own ConnectionType enum 2733 // (similar to how we deal with extra outbound peers). 2734 next_extra_block_relay = now + rng.rand_exp_duration(EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL); 2735 conn_type = ConnectionType::BLOCK_RELAY; 2736 } else if (now > next_feeler) { 2737 next_feeler = now + rng.rand_exp_duration(FEELER_INTERVAL); 2738 conn_type = ConnectionType::FEELER; 2739 fFeeler = true; 2740 } else if (nOutboundFullRelay == m_max_outbound_full_relay && 2741 m_max_outbound_full_relay == MAX_OUTBOUND_FULL_RELAY_CONNECTIONS && 2742 now > next_extra_network_peer && 2743 MaybePickPreferredNetwork(preferred_net)) { 2744 // Full outbound connection management: Attempt to get at least one 2745 // outbound peer from each reachable network by making extra connections 2746 // and then protecting "only" peers from a network during outbound eviction. 2747 // This is not attempted if the user changed -maxconnections to a value 2748 // so low that less than MAX_OUTBOUND_FULL_RELAY_CONNECTIONS are made, 2749 // to prevent interactions with otherwise protected outbound peers. 2750 next_extra_network_peer = now + rng.rand_exp_duration(EXTRA_NETWORK_PEER_INTERVAL); 2751 } else { 2752 // skip to next iteration of while loop 2753 continue; 2754 } 2755 2756 addrman.ResolveCollisions(); 2757 2758 const auto current_time{NodeClock::now()}; 2759 int nTries = 0; 2760 const auto reachable_nets{g_reachable_nets.All()}; 2761 2762 while (!interruptNet) 2763 { 2764 if (anchor && !m_anchors.empty()) { 2765 const CAddress addr = m_anchors.back(); 2766 m_anchors.pop_back(); 2767 if (!addr.IsValid() || IsLocal(addr) || !g_reachable_nets.Contains(addr) || 2768 !m_msgproc->HasAllDesirableServiceFlags(addr.nServices) || 2769 outbound_ipv46_peer_netgroups.count(m_netgroupman.GetGroup(addr))) continue; 2770 addrConnect = addr; 2771 LogDebug(BCLog::NET, "Trying to make an anchor connection to %s\n", addrConnect.ToStringAddrPort()); 2772 break; 2773 } 2774 2775 // If we didn't find an appropriate destination after trying 100 addresses fetched from addrman, 2776 // stop this loop, and let the outer loop run again (which sleeps, adds seed nodes, recalculates 2777 // already-connected network ranges, ...) before trying new addrman addresses. 2778 nTries++; 2779 if (nTries > 100) 2780 break; 2781 2782 CAddress addr; 2783 NodeSeconds addr_last_try{0s}; 2784 2785 if (fFeeler) { 2786 // First, try to get a tried table collision address. This returns 2787 // an empty (invalid) address if there are no collisions to try. 2788 std::tie(addr, addr_last_try) = addrman.SelectTriedCollision(); 2789 2790 if (!addr.IsValid()) { 2791 // No tried table collisions. Select a new table address 2792 // for our feeler. 2793 std::tie(addr, addr_last_try) = addrman.Select(true, reachable_nets); 2794 } else if (AlreadyConnectedToAddress(addr)) { 2795 // If test-before-evict logic would have us connect to a 2796 // peer that we're already connected to, just mark that 2797 // address as Good(). We won't be able to initiate the 2798 // connection anyway, so this avoids inadvertently evicting 2799 // a currently-connected peer. 2800 addrman.Good(addr); 2801 // Select a new table address for our feeler instead. 2802 std::tie(addr, addr_last_try) = addrman.Select(true, reachable_nets); 2803 } 2804 } else { 2805 // Not a feeler 2806 // If preferred_net has a value set, pick an extra outbound 2807 // peer from that network. The eviction logic in net_processing 2808 // ensures that a peer from another network will be evicted. 2809 std::tie(addr, addr_last_try) = preferred_net.has_value() 2810 ? addrman.Select(false, {*preferred_net}) 2811 : addrman.Select(false, reachable_nets); 2812 } 2813 2814 // Require outbound IPv4/IPv6 connections, other than feelers, to be to distinct network groups 2815 if (!fFeeler && outbound_ipv46_peer_netgroups.count(m_netgroupman.GetGroup(addr))) { 2816 continue; 2817 } 2818 2819 // if we selected an invalid or local address, restart 2820 if (!addr.IsValid() || IsLocal(addr)) { 2821 break; 2822 } 2823 2824 if (!g_reachable_nets.Contains(addr)) { 2825 continue; 2826 } 2827 2828 // only consider very recently tried nodes after 30 failed attempts 2829 if (current_time - addr_last_try < 10min && nTries < 30) { 2830 continue; 2831 } 2832 2833 // for non-feelers, require all the services we'll want, 2834 // for feelers, only require they be a full node (only because most 2835 // SPV clients don't have a good address DB available) 2836 if (!fFeeler && !m_msgproc->HasAllDesirableServiceFlags(addr.nServices)) { 2837 continue; 2838 } else if (fFeeler && !MayHaveUsefulAddressDB(addr.nServices)) { 2839 continue; 2840 } 2841 2842 // Do not connect to bad ports, unless 50 invalid addresses have been selected already. 2843 if (nTries < 50 && (addr.IsIPv4() || addr.IsIPv6()) && IsBadPort(addr.GetPort())) { 2844 continue; 2845 } 2846 2847 // Do not make automatic outbound connections to addnode peers, to 2848 // not use our limited outbound slots for them and to ensure 2849 // addnode connections benefit from their intended protections. 2850 if (AddedNodesContain(addr)) { 2851 LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "Not making automatic %s%s connection to %s peer selected for manual (addnode) connection%s\n", 2852 preferred_net.has_value() ? "network-specific " : "", 2853 ConnectionTypeAsString(conn_type), GetNetworkName(addr.GetNetwork()), 2854 fLogIPs ? strprintf(": %s", addr.ToStringAddrPort()) : ""); 2855 continue; 2856 } 2857 2858 addrConnect = addr; 2859 break; 2860 } 2861 2862 if (addrConnect.IsValid()) { 2863 if (fFeeler) { 2864 // Add small amount of random noise before connection to avoid synchronization. 2865 if (!interruptNet.sleep_for(rng.rand_uniform_duration<CThreadInterrupt::Clock>(FEELER_SLEEP_WINDOW))) { 2866 return; 2867 } 2868 LogDebug(BCLog::NET, "Making feeler connection to %s\n", addrConnect.ToStringAddrPort()); 2869 } 2870 2871 if (preferred_net != std::nullopt) LogDebug(BCLog::NET, "Making network specific connection to %s on %s.\n", addrConnect.ToStringAddrPort(), GetNetworkName(preferred_net.value())); 2872 2873 // Record addrman failure attempts when node has at least 2 persistent outbound connections to peers with 2874 // different netgroups in ipv4/ipv6 networks + all peers in Tor/I2P/CJDNS networks. 2875 // Don't record addrman failure attempts when node is offline. This can be identified since all local 2876 // network connections (if any) belong in the same netgroup, and the size of `outbound_ipv46_peer_netgroups` would only be 1. 2877 const bool count_failures{((int)outbound_ipv46_peer_netgroups.size() + outbound_privacy_network_peers) >= std::min(m_max_automatic_connections - 1, 2)}; 2878 // Use BIP324 transport when both us and them have NODE_V2_P2P set. 2879 const bool use_v2transport(addrConnect.nServices & GetLocalServices() & NODE_P2P_V2); 2880 OpenNetworkConnection(addrConnect, count_failures, std::move(grant), /*strDest=*/nullptr, conn_type, use_v2transport); 2881 } 2882 } 2883 } 2884 2885 std::vector<CAddress> CConnman::GetCurrentBlockRelayOnlyConns() const 2886 { 2887 std::vector<CAddress> ret; 2888 LOCK(m_nodes_mutex); 2889 for (const CNode* pnode : m_nodes) { 2890 if (pnode->IsBlockOnlyConn()) { 2891 ret.push_back(pnode->addr); 2892 } 2893 } 2894 2895 return ret; 2896 } 2897 2898 std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo(bool include_connected) const 2899 { 2900 std::vector<AddedNodeInfo> ret; 2901 2902 std::list<AddedNodeParams> lAddresses(0); 2903 { 2904 LOCK(m_added_nodes_mutex); 2905 ret.reserve(m_added_node_params.size()); 2906 std::copy(m_added_node_params.cbegin(), m_added_node_params.cend(), std::back_inserter(lAddresses)); 2907 } 2908 2909 2910 // Build a map of all already connected addresses (by IP:port and by name) to inbound/outbound and resolved CService 2911 std::map<CService, bool> mapConnected; 2912 std::map<std::string, std::pair<bool, CService>> mapConnectedByName; 2913 { 2914 LOCK(m_nodes_mutex); 2915 for (const CNode* pnode : m_nodes) { 2916 if (pnode->addr.IsValid()) { 2917 mapConnected[pnode->addr] = pnode->IsInboundConn(); 2918 } 2919 std::string addrName{pnode->m_addr_name}; 2920 if (!addrName.empty()) { 2921 mapConnectedByName[std::move(addrName)] = std::make_pair(pnode->IsInboundConn(), static_cast<const CService&>(pnode->addr)); 2922 } 2923 } 2924 } 2925 2926 for (const auto& addr : lAddresses) { 2927 CService service{MaybeFlipIPv6toCJDNS(LookupNumeric(addr.m_added_node, GetDefaultPort(addr.m_added_node)))}; 2928 AddedNodeInfo addedNode{addr, CService(), false, false}; 2929 if (service.IsValid()) { 2930 // strAddNode is an IP:port 2931 auto it = mapConnected.find(service); 2932 if (it != mapConnected.end()) { 2933 if (!include_connected) { 2934 continue; 2935 } 2936 addedNode.resolvedAddress = service; 2937 addedNode.fConnected = true; 2938 addedNode.fInbound = it->second; 2939 } 2940 } else { 2941 // strAddNode is a name 2942 auto it = mapConnectedByName.find(addr.m_added_node); 2943 if (it != mapConnectedByName.end()) { 2944 if (!include_connected) { 2945 continue; 2946 } 2947 addedNode.resolvedAddress = it->second.second; 2948 addedNode.fConnected = true; 2949 addedNode.fInbound = it->second.first; 2950 } 2951 } 2952 ret.emplace_back(std::move(addedNode)); 2953 } 2954 2955 return ret; 2956 } 2957 2958 void CConnman::ThreadOpenAddedConnections() 2959 { 2960 AssertLockNotHeld(m_unused_i2p_sessions_mutex); 2961 AssertLockNotHeld(m_reconnections_mutex); 2962 while (true) 2963 { 2964 CSemaphoreGrant grant(*semAddnode); 2965 std::vector<AddedNodeInfo> vInfo = GetAddedNodeInfo(/*include_connected=*/false); 2966 bool tried = false; 2967 for (const AddedNodeInfo& info : vInfo) { 2968 if (!grant) { 2969 // If we've used up our semaphore and need a new one, let's not wait here since while we are waiting 2970 // the addednodeinfo state might change. 2971 break; 2972 } 2973 tried = true; 2974 CAddress addr(CService(), NODE_NONE); 2975 OpenNetworkConnection(addr, false, std::move(grant), info.m_params.m_added_node.c_str(), ConnectionType::MANUAL, info.m_params.m_use_v2transport); 2976 if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) return; 2977 grant = CSemaphoreGrant(*semAddnode, /*fTry=*/true); 2978 } 2979 // See if any reconnections are desired. 2980 PerformReconnections(); 2981 // Retry every 60 seconds if a connection was attempted, otherwise two seconds 2982 if (!interruptNet.sleep_for(std::chrono::seconds(tried ? 60 : 2))) 2983 return; 2984 } 2985 } 2986 2987 // if successful, this moves the passed grant to the constructed node 2988 void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant&& grant_outbound, const char *pszDest, ConnectionType conn_type, bool use_v2transport) 2989 { 2990 AssertLockNotHeld(m_unused_i2p_sessions_mutex); 2991 assert(conn_type != ConnectionType::INBOUND); 2992 2993 // 2994 // Initiate outbound network connection 2995 // 2996 if (interruptNet) { 2997 return; 2998 } 2999 if (!fNetworkActive) { 3000 return; 3001 } 3002 if (!pszDest) { 3003 bool banned_or_discouraged = m_banman && (m_banman->IsDiscouraged(addrConnect) || m_banman->IsBanned(addrConnect)); 3004 if (IsLocal(addrConnect) || banned_or_discouraged || AlreadyConnectedToAddress(addrConnect)) { 3005 return; 3006 } 3007 } else if (FindNode(std::string(pszDest))) 3008 return; 3009 3010 CNode* pnode = ConnectNode(addrConnect, pszDest, fCountFailure, conn_type, use_v2transport); 3011 3012 if (!pnode) 3013 return; 3014 pnode->grantOutbound = std::move(grant_outbound); 3015 3016 m_msgproc->InitializeNode(*pnode, m_local_services); 3017 { 3018 LOCK(m_nodes_mutex); 3019 m_nodes.push_back(pnode); 3020 3021 // update connection count by network 3022 if (pnode->IsManualOrFullOutboundConn()) ++m_network_conn_counts[pnode->addr.GetNetwork()]; 3023 } 3024 3025 TRACEPOINT(net, outbound_connection, 3026 pnode->GetId(), 3027 pnode->m_addr_name.c_str(), 3028 pnode->ConnectionTypeAsString().c_str(), 3029 pnode->ConnectedThroughNetwork(), 3030 GetNodeCount(ConnectionDirection::Out)); 3031 } 3032 3033 Mutex NetEventsInterface::g_msgproc_mutex; 3034 3035 void CConnman::ThreadMessageHandler() 3036 { 3037 LOCK(NetEventsInterface::g_msgproc_mutex); 3038 3039 while (!flagInterruptMsgProc) 3040 { 3041 bool fMoreWork = false; 3042 3043 { 3044 // Randomize the order in which we process messages from/to our peers. 3045 // This prevents attacks in which an attacker exploits having multiple 3046 // consecutive connections in the m_nodes list. 3047 const NodesSnapshot snap{*this, /*shuffle=*/true}; 3048 3049 for (CNode* pnode : snap.Nodes()) { 3050 if (pnode->fDisconnect) 3051 continue; 3052 3053 // Receive messages 3054 bool fMoreNodeWork = m_msgproc->ProcessMessages(pnode, flagInterruptMsgProc); 3055 fMoreWork |= (fMoreNodeWork && !pnode->fPauseSend); 3056 if (flagInterruptMsgProc) 3057 return; 3058 // Send messages 3059 m_msgproc->SendMessages(pnode); 3060 3061 if (flagInterruptMsgProc) 3062 return; 3063 } 3064 } 3065 3066 WAIT_LOCK(mutexMsgProc, lock); 3067 if (!fMoreWork) { 3068 condMsgProc.wait_until(lock, std::chrono::steady_clock::now() + std::chrono::milliseconds(100), [this]() EXCLUSIVE_LOCKS_REQUIRED(mutexMsgProc) { return fMsgProcWake; }); 3069 } 3070 fMsgProcWake = false; 3071 } 3072 } 3073 3074 void CConnman::ThreadI2PAcceptIncoming() 3075 { 3076 static constexpr auto err_wait_begin = 1s; 3077 static constexpr auto err_wait_cap = 5min; 3078 auto err_wait = err_wait_begin; 3079 3080 bool advertising_listen_addr = false; 3081 i2p::Connection conn; 3082 3083 auto SleepOnFailure = [&]() { 3084 interruptNet.sleep_for(err_wait); 3085 if (err_wait < err_wait_cap) { 3086 err_wait += 1s; 3087 } 3088 }; 3089 3090 while (!interruptNet) { 3091 3092 if (!m_i2p_sam_session->Listen(conn)) { 3093 if (advertising_listen_addr && conn.me.IsValid()) { 3094 RemoveLocal(conn.me); 3095 advertising_listen_addr = false; 3096 } 3097 SleepOnFailure(); 3098 continue; 3099 } 3100 3101 if (!advertising_listen_addr) { 3102 AddLocal(conn.me, LOCAL_MANUAL); 3103 advertising_listen_addr = true; 3104 } 3105 3106 if (!m_i2p_sam_session->Accept(conn)) { 3107 SleepOnFailure(); 3108 continue; 3109 } 3110 3111 CreateNodeFromAcceptedSocket(std::move(conn.sock), NetPermissionFlags::None, conn.me, conn.peer); 3112 3113 err_wait = err_wait_begin; 3114 } 3115 } 3116 3117 bool CConnman::BindListenPort(const CService& addrBind, bilingual_str& strError, NetPermissionFlags permissions) 3118 { 3119 int nOne = 1; 3120 3121 // Create socket for listening for incoming connections 3122 struct sockaddr_storage sockaddr; 3123 socklen_t len = sizeof(sockaddr); 3124 if (!addrBind.GetSockAddr((struct sockaddr*)&sockaddr, &len)) 3125 { 3126 strError = Untranslated(strprintf("Bind address family for %s not supported", addrBind.ToStringAddrPort())); 3127 LogPrintLevel(BCLog::NET, BCLog::Level::Error, "%s\n", strError.original); 3128 return false; 3129 } 3130 3131 std::unique_ptr<Sock> sock = CreateSock(addrBind.GetSAFamily(), SOCK_STREAM, IPPROTO_TCP); 3132 if (!sock) { 3133 strError = Untranslated(strprintf("Couldn't open socket for incoming connections (socket returned error %s)", NetworkErrorString(WSAGetLastError()))); 3134 LogPrintLevel(BCLog::NET, BCLog::Level::Error, "%s\n", strError.original); 3135 return false; 3136 } 3137 3138 // Allow binding if the port is still in TIME_WAIT state after 3139 // the program was closed and restarted. 3140 if (sock->SetSockOpt(SOL_SOCKET, SO_REUSEADDR, (sockopt_arg_type)&nOne, sizeof(int)) == SOCKET_ERROR) { 3141 strError = Untranslated(strprintf("Error setting SO_REUSEADDR on socket: %s, continuing anyway", NetworkErrorString(WSAGetLastError()))); 3142 LogPrintf("%s\n", strError.original); 3143 } 3144 3145 // some systems don't have IPV6_V6ONLY but are always v6only; others do have the option 3146 // and enable it by default or not. Try to enable it, if possible. 3147 if (addrBind.IsIPv6()) { 3148 #ifdef IPV6_V6ONLY 3149 if (sock->SetSockOpt(IPPROTO_IPV6, IPV6_V6ONLY, (sockopt_arg_type)&nOne, sizeof(int)) == SOCKET_ERROR) { 3150 strError = Untranslated(strprintf("Error setting IPV6_V6ONLY on socket: %s, continuing anyway", NetworkErrorString(WSAGetLastError()))); 3151 LogPrintf("%s\n", strError.original); 3152 } 3153 #endif 3154 #ifdef WIN32 3155 int nProtLevel = PROTECTION_LEVEL_UNRESTRICTED; 3156 if (sock->SetSockOpt(IPPROTO_IPV6, IPV6_PROTECTION_LEVEL, (const char*)&nProtLevel, sizeof(int)) == SOCKET_ERROR) { 3157 strError = Untranslated(strprintf("Error setting IPV6_PROTECTION_LEVEL on socket: %s, continuing anyway", NetworkErrorString(WSAGetLastError()))); 3158 LogPrintf("%s\n", strError.original); 3159 } 3160 #endif 3161 } 3162 3163 if (sock->Bind(reinterpret_cast<struct sockaddr*>(&sockaddr), len) == SOCKET_ERROR) { 3164 int nErr = WSAGetLastError(); 3165 if (nErr == WSAEADDRINUSE) 3166 strError = strprintf(_("Unable to bind to %s on this computer. %s is probably already running."), addrBind.ToStringAddrPort(), CLIENT_NAME); 3167 else 3168 strError = strprintf(_("Unable to bind to %s on this computer (bind returned error %s)"), addrBind.ToStringAddrPort(), NetworkErrorString(nErr)); 3169 LogPrintLevel(BCLog::NET, BCLog::Level::Error, "%s\n", strError.original); 3170 return false; 3171 } 3172 LogPrintf("Bound to %s\n", addrBind.ToStringAddrPort()); 3173 3174 // Listen for incoming connections 3175 if (sock->Listen(SOMAXCONN) == SOCKET_ERROR) 3176 { 3177 strError = strprintf(_("Listening for incoming connections failed (listen returned error %s)"), NetworkErrorString(WSAGetLastError())); 3178 LogPrintLevel(BCLog::NET, BCLog::Level::Error, "%s\n", strError.original); 3179 return false; 3180 } 3181 3182 vhListenSocket.emplace_back(std::move(sock), permissions); 3183 return true; 3184 } 3185 3186 void Discover() 3187 { 3188 if (!fDiscover) 3189 return; 3190 3191 for (const CNetAddr &addr: GetLocalAddresses()) { 3192 if (AddLocal(addr, LOCAL_IF)) 3193 LogPrintf("%s: %s\n", __func__, addr.ToStringAddr()); 3194 } 3195 } 3196 3197 void CConnman::SetNetworkActive(bool active) 3198 { 3199 LogPrintf("%s: %s\n", __func__, active); 3200 3201 if (fNetworkActive == active) { 3202 return; 3203 } 3204 3205 fNetworkActive = active; 3206 3207 if (m_client_interface) { 3208 m_client_interface->NotifyNetworkActiveChanged(fNetworkActive); 3209 } 3210 } 3211 3212 CConnman::CConnman(uint64_t nSeed0In, uint64_t nSeed1In, AddrMan& addrman_in, 3213 const NetGroupManager& netgroupman, const CChainParams& params, bool network_active) 3214 : addrman(addrman_in) 3215 , m_netgroupman{netgroupman} 3216 , nSeed0(nSeed0In) 3217 , nSeed1(nSeed1In) 3218 , m_params(params) 3219 { 3220 SetTryNewOutboundPeer(false); 3221 3222 Options connOptions; 3223 Init(connOptions); 3224 SetNetworkActive(network_active); 3225 } 3226 3227 NodeId CConnman::GetNewNodeId() 3228 { 3229 return nLastNodeId.fetch_add(1, std::memory_order_relaxed); 3230 } 3231 3232 uint16_t CConnman::GetDefaultPort(Network net) const 3233 { 3234 return net == NET_I2P ? I2P_SAM31_PORT : m_params.GetDefaultPort(); 3235 } 3236 3237 uint16_t CConnman::GetDefaultPort(const std::string& addr) const 3238 { 3239 CNetAddr a; 3240 return a.SetSpecial(addr) ? GetDefaultPort(a.GetNetwork()) : m_params.GetDefaultPort(); 3241 } 3242 3243 bool CConnman::Bind(const CService& addr_, unsigned int flags, NetPermissionFlags permissions) 3244 { 3245 const CService addr{MaybeFlipIPv6toCJDNS(addr_)}; 3246 3247 bilingual_str strError; 3248 if (!BindListenPort(addr, strError, permissions)) { 3249 if ((flags & BF_REPORT_ERROR) && m_client_interface) { 3250 m_client_interface->ThreadSafeMessageBox(strError, "", CClientUIInterface::MSG_ERROR); 3251 } 3252 return false; 3253 } 3254 3255 if (addr.IsRoutable() && fDiscover && !(flags & BF_DONT_ADVERTISE) && !NetPermissions::HasFlag(permissions, NetPermissionFlags::NoBan)) { 3256 AddLocal(addr, LOCAL_BIND); 3257 } 3258 3259 return true; 3260 } 3261 3262 bool CConnman::InitBinds(const Options& options) 3263 { 3264 for (const auto& addrBind : options.vBinds) { 3265 if (!Bind(addrBind, BF_REPORT_ERROR, NetPermissionFlags::None)) { 3266 return false; 3267 } 3268 } 3269 for (const auto& addrBind : options.vWhiteBinds) { 3270 if (!Bind(addrBind.m_service, BF_REPORT_ERROR, addrBind.m_flags)) { 3271 return false; 3272 } 3273 } 3274 for (const auto& addr_bind : options.onion_binds) { 3275 if (!Bind(addr_bind, BF_REPORT_ERROR | BF_DONT_ADVERTISE, NetPermissionFlags::None)) { 3276 return false; 3277 } 3278 } 3279 if (options.bind_on_any) { 3280 // Don't consider errors to bind on IPv6 "::" fatal because the host OS 3281 // may not have IPv6 support and the user did not explicitly ask us to 3282 // bind on that. 3283 const CService ipv6_any{in6_addr(IN6ADDR_ANY_INIT), GetListenPort()}; // :: 3284 Bind(ipv6_any, BF_NONE, NetPermissionFlags::None); 3285 3286 struct in_addr inaddr_any; 3287 inaddr_any.s_addr = htonl(INADDR_ANY); 3288 const CService ipv4_any{inaddr_any, GetListenPort()}; // 0.0.0.0 3289 if (!Bind(ipv4_any, BF_REPORT_ERROR, NetPermissionFlags::None)) { 3290 return false; 3291 } 3292 } 3293 return true; 3294 } 3295 3296 bool CConnman::Start(CScheduler& scheduler, const Options& connOptions) 3297 { 3298 AssertLockNotHeld(m_total_bytes_sent_mutex); 3299 Init(connOptions); 3300 3301 if (fListen && !InitBinds(connOptions)) { 3302 if (m_client_interface) { 3303 m_client_interface->ThreadSafeMessageBox( 3304 _("Failed to listen on any port. Use -listen=0 if you want this."), 3305 "", CClientUIInterface::MSG_ERROR); 3306 } 3307 return false; 3308 } 3309 3310 Proxy i2p_sam; 3311 if (GetProxy(NET_I2P, i2p_sam) && connOptions.m_i2p_accept_incoming) { 3312 m_i2p_sam_session = std::make_unique<i2p::sam::Session>(gArgs.GetDataDirNet() / "i2p_private_key", 3313 i2p_sam, &interruptNet); 3314 } 3315 3316 // Randomize the order in which we may query seednode to potentially prevent connecting to the same one every restart (and signal that we have restarted) 3317 std::vector<std::string> seed_nodes = connOptions.vSeedNodes; 3318 if (!seed_nodes.empty()) { 3319 std::shuffle(seed_nodes.begin(), seed_nodes.end(), FastRandomContext{}); 3320 } 3321 3322 if (m_use_addrman_outgoing) { 3323 // Load addresses from anchors.dat 3324 m_anchors = ReadAnchors(gArgs.GetDataDirNet() / ANCHORS_DATABASE_FILENAME); 3325 if (m_anchors.size() > MAX_BLOCK_RELAY_ONLY_ANCHORS) { 3326 m_anchors.resize(MAX_BLOCK_RELAY_ONLY_ANCHORS); 3327 } 3328 LogPrintf("%i block-relay-only anchors will be tried for connections.\n", m_anchors.size()); 3329 } 3330 3331 if (m_client_interface) { 3332 m_client_interface->InitMessage(_("Starting network threads…")); 3333 } 3334 3335 fAddressesInitialized = true; 3336 3337 if (semOutbound == nullptr) { 3338 // initialize semaphore 3339 semOutbound = std::make_unique<CSemaphore>(std::min(m_max_automatic_outbound, m_max_automatic_connections)); 3340 } 3341 if (semAddnode == nullptr) { 3342 // initialize semaphore 3343 semAddnode = std::make_unique<CSemaphore>(m_max_addnode); 3344 } 3345 3346 // 3347 // Start threads 3348 // 3349 assert(m_msgproc); 3350 interruptNet.reset(); 3351 flagInterruptMsgProc = false; 3352 3353 { 3354 LOCK(mutexMsgProc); 3355 fMsgProcWake = false; 3356 } 3357 3358 // Send and receive from sockets, accept connections 3359 threadSocketHandler = std::thread(&util::TraceThread, "net", [this] { ThreadSocketHandler(); }); 3360 3361 if (!gArgs.GetBoolArg("-dnsseed", DEFAULT_DNSSEED)) 3362 LogPrintf("DNS seeding disabled\n"); 3363 else 3364 threadDNSAddressSeed = std::thread(&util::TraceThread, "dnsseed", [this] { ThreadDNSAddressSeed(); }); 3365 3366 // Initiate manual connections 3367 threadOpenAddedConnections = std::thread(&util::TraceThread, "addcon", [this] { ThreadOpenAddedConnections(); }); 3368 3369 if (connOptions.m_use_addrman_outgoing && !connOptions.m_specified_outgoing.empty()) { 3370 if (m_client_interface) { 3371 m_client_interface->ThreadSafeMessageBox( 3372 _("Cannot provide specific connections and have addrman find outgoing connections at the same time."), 3373 "", CClientUIInterface::MSG_ERROR); 3374 } 3375 return false; 3376 } 3377 if (connOptions.m_use_addrman_outgoing || !connOptions.m_specified_outgoing.empty()) { 3378 threadOpenConnections = std::thread( 3379 &util::TraceThread, "opencon", 3380 [this, connect = connOptions.m_specified_outgoing, seed_nodes = std::move(seed_nodes)] { ThreadOpenConnections(connect, seed_nodes); }); 3381 } 3382 3383 // Process messages 3384 threadMessageHandler = std::thread(&util::TraceThread, "msghand", [this] { ThreadMessageHandler(); }); 3385 3386 if (m_i2p_sam_session) { 3387 threadI2PAcceptIncoming = 3388 std::thread(&util::TraceThread, "i2paccept", [this] { ThreadI2PAcceptIncoming(); }); 3389 } 3390 3391 // Dump network addresses 3392 scheduler.scheduleEvery([this] { DumpAddresses(); }, DUMP_PEERS_INTERVAL); 3393 3394 // Run the ASMap Health check once and then schedule it to run every 24h. 3395 if (m_netgroupman.UsingASMap()) { 3396 ASMapHealthCheck(); 3397 scheduler.scheduleEvery([this] { ASMapHealthCheck(); }, ASMAP_HEALTH_CHECK_INTERVAL); 3398 } 3399 3400 return true; 3401 } 3402 3403 class CNetCleanup 3404 { 3405 public: 3406 CNetCleanup() = default; 3407 3408 ~CNetCleanup() 3409 { 3410 #ifdef WIN32 3411 // Shutdown Windows Sockets 3412 WSACleanup(); 3413 #endif 3414 } 3415 }; 3416 static CNetCleanup instance_of_cnetcleanup; 3417 3418 void CConnman::Interrupt() 3419 { 3420 { 3421 LOCK(mutexMsgProc); 3422 flagInterruptMsgProc = true; 3423 } 3424 condMsgProc.notify_all(); 3425 3426 interruptNet(); 3427 g_socks5_interrupt(); 3428 3429 if (semOutbound) { 3430 for (int i=0; i<m_max_automatic_outbound; i++) { 3431 semOutbound->post(); 3432 } 3433 } 3434 3435 if (semAddnode) { 3436 for (int i=0; i<m_max_addnode; i++) { 3437 semAddnode->post(); 3438 } 3439 } 3440 } 3441 3442 void CConnman::StopThreads() 3443 { 3444 if (threadI2PAcceptIncoming.joinable()) { 3445 threadI2PAcceptIncoming.join(); 3446 } 3447 if (threadMessageHandler.joinable()) 3448 threadMessageHandler.join(); 3449 if (threadOpenConnections.joinable()) 3450 threadOpenConnections.join(); 3451 if (threadOpenAddedConnections.joinable()) 3452 threadOpenAddedConnections.join(); 3453 if (threadDNSAddressSeed.joinable()) 3454 threadDNSAddressSeed.join(); 3455 if (threadSocketHandler.joinable()) 3456 threadSocketHandler.join(); 3457 } 3458 3459 void CConnman::StopNodes() 3460 { 3461 if (fAddressesInitialized) { 3462 DumpAddresses(); 3463 fAddressesInitialized = false; 3464 3465 if (m_use_addrman_outgoing) { 3466 // Anchor connections are only dumped during clean shutdown. 3467 std::vector<CAddress> anchors_to_dump = GetCurrentBlockRelayOnlyConns(); 3468 if (anchors_to_dump.size() > MAX_BLOCK_RELAY_ONLY_ANCHORS) { 3469 anchors_to_dump.resize(MAX_BLOCK_RELAY_ONLY_ANCHORS); 3470 } 3471 DumpAnchors(gArgs.GetDataDirNet() / ANCHORS_DATABASE_FILENAME, anchors_to_dump); 3472 } 3473 } 3474 3475 // Delete peer connections. 3476 std::vector<CNode*> nodes; 3477 WITH_LOCK(m_nodes_mutex, nodes.swap(m_nodes)); 3478 for (CNode* pnode : nodes) { 3479 LogDebug(BCLog::NET, "Stopping node, %s", pnode->DisconnectMsg(fLogIPs)); 3480 pnode->CloseSocketDisconnect(); 3481 DeleteNode(pnode); 3482 } 3483 3484 for (CNode* pnode : m_nodes_disconnected) { 3485 DeleteNode(pnode); 3486 } 3487 m_nodes_disconnected.clear(); 3488 vhListenSocket.clear(); 3489 semOutbound.reset(); 3490 semAddnode.reset(); 3491 } 3492 3493 void CConnman::DeleteNode(CNode* pnode) 3494 { 3495 assert(pnode); 3496 m_msgproc->FinalizeNode(*pnode); 3497 delete pnode; 3498 } 3499 3500 CConnman::~CConnman() 3501 { 3502 Interrupt(); 3503 Stop(); 3504 } 3505 3506 std::vector<CAddress> CConnman::GetAddresses(size_t max_addresses, size_t max_pct, std::optional<Network> network, const bool filtered) const 3507 { 3508 std::vector<CAddress> addresses = addrman.GetAddr(max_addresses, max_pct, network, filtered); 3509 if (m_banman) { 3510 addresses.erase(std::remove_if(addresses.begin(), addresses.end(), 3511 [this](const CAddress& addr){return m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr);}), 3512 addresses.end()); 3513 } 3514 return addresses; 3515 } 3516 3517 std::vector<CAddress> CConnman::GetAddresses(CNode& requestor, size_t max_addresses, size_t max_pct) 3518 { 3519 auto local_socket_bytes = requestor.addrBind.GetAddrBytes(); 3520 uint64_t cache_id = GetDeterministicRandomizer(RANDOMIZER_ID_ADDRCACHE) 3521 .Write(requestor.ConnectedThroughNetwork()) 3522 .Write(local_socket_bytes) 3523 // For outbound connections, the port of the bound address is randomly 3524 // assigned by the OS and would therefore not be useful for seeding. 3525 .Write(requestor.IsInboundConn() ? requestor.addrBind.GetPort() : 0) 3526 .Finalize(); 3527 const auto current_time = GetTime<std::chrono::microseconds>(); 3528 auto r = m_addr_response_caches.emplace(cache_id, CachedAddrResponse{}); 3529 CachedAddrResponse& cache_entry = r.first->second; 3530 if (cache_entry.m_cache_entry_expiration < current_time) { // If emplace() added new one it has expiration 0. 3531 cache_entry.m_addrs_response_cache = GetAddresses(max_addresses, max_pct, /*network=*/std::nullopt); 3532 // Choosing a proper cache lifetime is a trade-off between the privacy leak minimization 3533 // and the usefulness of ADDR responses to honest users. 3534 // 3535 // Longer cache lifetime makes it more difficult for an attacker to scrape 3536 // enough AddrMan data to maliciously infer something useful. 3537 // By the time an attacker scraped enough AddrMan records, most of 3538 // the records should be old enough to not leak topology info by 3539 // e.g. analyzing real-time changes in timestamps. 3540 // 3541 // It takes only several hundred requests to scrape everything from an AddrMan containing 100,000 nodes, 3542 // so ~24 hours of cache lifetime indeed makes the data less inferable by the time 3543 // most of it could be scraped (considering that timestamps are updated via 3544 // ADDR self-announcements and when nodes communicate). 3545 // We also should be robust to those attacks which may not require scraping *full* victim's AddrMan 3546 // (because even several timestamps of the same handful of nodes may leak privacy). 3547 // 3548 // On the other hand, longer cache lifetime makes ADDR responses 3549 // outdated and less useful for an honest requestor, e.g. if most nodes 3550 // in the ADDR response are no longer active. 3551 // 3552 // However, the churn in the network is known to be rather low. Since we consider 3553 // nodes to be "terrible" (see IsTerrible()) if the timestamps are older than 30 days, 3554 // max. 24 hours of "penalty" due to cache shouldn't make any meaningful difference 3555 // in terms of the freshness of the response. 3556 cache_entry.m_cache_entry_expiration = current_time + 3557 21h + FastRandomContext().randrange<std::chrono::microseconds>(6h); 3558 } 3559 return cache_entry.m_addrs_response_cache; 3560 } 3561 3562 bool CConnman::AddNode(const AddedNodeParams& add) 3563 { 3564 const CService resolved(LookupNumeric(add.m_added_node, GetDefaultPort(add.m_added_node))); 3565 const bool resolved_is_valid{resolved.IsValid()}; 3566 3567 LOCK(m_added_nodes_mutex); 3568 for (const auto& it : m_added_node_params) { 3569 if (add.m_added_node == it.m_added_node || (resolved_is_valid && resolved == LookupNumeric(it.m_added_node, GetDefaultPort(it.m_added_node)))) return false; 3570 } 3571 3572 m_added_node_params.push_back(add); 3573 return true; 3574 } 3575 3576 bool CConnman::RemoveAddedNode(const std::string& strNode) 3577 { 3578 LOCK(m_added_nodes_mutex); 3579 for (auto it = m_added_node_params.begin(); it != m_added_node_params.end(); ++it) { 3580 if (strNode == it->m_added_node) { 3581 m_added_node_params.erase(it); 3582 return true; 3583 } 3584 } 3585 return false; 3586 } 3587 3588 bool CConnman::AddedNodesContain(const CAddress& addr) const 3589 { 3590 AssertLockNotHeld(m_added_nodes_mutex); 3591 const std::string addr_str{addr.ToStringAddr()}; 3592 const std::string addr_port_str{addr.ToStringAddrPort()}; 3593 LOCK(m_added_nodes_mutex); 3594 return (m_added_node_params.size() < 24 // bound the query to a reasonable limit 3595 && std::any_of(m_added_node_params.cbegin(), m_added_node_params.cend(), 3596 [&](const auto& p) { return p.m_added_node == addr_str || p.m_added_node == addr_port_str; })); 3597 } 3598 3599 size_t CConnman::GetNodeCount(ConnectionDirection flags) const 3600 { 3601 LOCK(m_nodes_mutex); 3602 if (flags == ConnectionDirection::Both) // Shortcut if we want total 3603 return m_nodes.size(); 3604 3605 int nNum = 0; 3606 for (const auto& pnode : m_nodes) { 3607 if (flags & (pnode->IsInboundConn() ? ConnectionDirection::In : ConnectionDirection::Out)) { 3608 nNum++; 3609 } 3610 } 3611 3612 return nNum; 3613 } 3614 3615 3616 std::map<CNetAddr, LocalServiceInfo> CConnman::getNetLocalAddresses() const 3617 { 3618 LOCK(g_maplocalhost_mutex); 3619 return mapLocalHost; 3620 } 3621 3622 uint32_t CConnman::GetMappedAS(const CNetAddr& addr) const 3623 { 3624 return m_netgroupman.GetMappedAS(addr); 3625 } 3626 3627 void CConnman::GetNodeStats(std::vector<CNodeStats>& vstats) const 3628 { 3629 vstats.clear(); 3630 LOCK(m_nodes_mutex); 3631 vstats.reserve(m_nodes.size()); 3632 for (CNode* pnode : m_nodes) { 3633 vstats.emplace_back(); 3634 pnode->CopyStats(vstats.back()); 3635 vstats.back().m_mapped_as = GetMappedAS(pnode->addr); 3636 } 3637 } 3638 3639 bool CConnman::DisconnectNode(const std::string& strNode) 3640 { 3641 LOCK(m_nodes_mutex); 3642 if (CNode* pnode = FindNode(strNode)) { 3643 LogDebug(BCLog::NET, "disconnect by address%s match, %s", (fLogIPs ? strprintf("=%s", strNode) : ""), pnode->DisconnectMsg(fLogIPs)); 3644 pnode->fDisconnect = true; 3645 return true; 3646 } 3647 return false; 3648 } 3649 3650 bool CConnman::DisconnectNode(const CSubNet& subnet) 3651 { 3652 bool disconnected = false; 3653 LOCK(m_nodes_mutex); 3654 for (CNode* pnode : m_nodes) { 3655 if (subnet.Match(pnode->addr)) { 3656 LogDebug(BCLog::NET, "disconnect by subnet%s match, %s", (fLogIPs ? strprintf("=%s", subnet.ToString()) : ""), pnode->DisconnectMsg(fLogIPs)); 3657 pnode->fDisconnect = true; 3658 disconnected = true; 3659 } 3660 } 3661 return disconnected; 3662 } 3663 3664 bool CConnman::DisconnectNode(const CNetAddr& addr) 3665 { 3666 return DisconnectNode(CSubNet(addr)); 3667 } 3668 3669 bool CConnman::DisconnectNode(NodeId id) 3670 { 3671 LOCK(m_nodes_mutex); 3672 for(CNode* pnode : m_nodes) { 3673 if (id == pnode->GetId()) { 3674 LogDebug(BCLog::NET, "disconnect by id, %s", pnode->DisconnectMsg(fLogIPs)); 3675 pnode->fDisconnect = true; 3676 return true; 3677 } 3678 } 3679 return false; 3680 } 3681 3682 void CConnman::RecordBytesRecv(uint64_t bytes) 3683 { 3684 nTotalBytesRecv += bytes; 3685 } 3686 3687 void CConnman::RecordBytesSent(uint64_t bytes) 3688 { 3689 AssertLockNotHeld(m_total_bytes_sent_mutex); 3690 LOCK(m_total_bytes_sent_mutex); 3691 3692 nTotalBytesSent += bytes; 3693 3694 const auto now = GetTime<std::chrono::seconds>(); 3695 if (nMaxOutboundCycleStartTime + MAX_UPLOAD_TIMEFRAME < now) 3696 { 3697 // timeframe expired, reset cycle 3698 nMaxOutboundCycleStartTime = now; 3699 nMaxOutboundTotalBytesSentInCycle = 0; 3700 } 3701 3702 nMaxOutboundTotalBytesSentInCycle += bytes; 3703 } 3704 3705 uint64_t CConnman::GetMaxOutboundTarget() const 3706 { 3707 AssertLockNotHeld(m_total_bytes_sent_mutex); 3708 LOCK(m_total_bytes_sent_mutex); 3709 return nMaxOutboundLimit; 3710 } 3711 3712 std::chrono::seconds CConnman::GetMaxOutboundTimeframe() const 3713 { 3714 return MAX_UPLOAD_TIMEFRAME; 3715 } 3716 3717 std::chrono::seconds CConnman::GetMaxOutboundTimeLeftInCycle() const 3718 { 3719 AssertLockNotHeld(m_total_bytes_sent_mutex); 3720 LOCK(m_total_bytes_sent_mutex); 3721 return GetMaxOutboundTimeLeftInCycle_(); 3722 } 3723 3724 std::chrono::seconds CConnman::GetMaxOutboundTimeLeftInCycle_() const 3725 { 3726 AssertLockHeld(m_total_bytes_sent_mutex); 3727 3728 if (nMaxOutboundLimit == 0) 3729 return 0s; 3730 3731 if (nMaxOutboundCycleStartTime.count() == 0) 3732 return MAX_UPLOAD_TIMEFRAME; 3733 3734 const std::chrono::seconds cycleEndTime = nMaxOutboundCycleStartTime + MAX_UPLOAD_TIMEFRAME; 3735 const auto now = GetTime<std::chrono::seconds>(); 3736 return (cycleEndTime < now) ? 0s : cycleEndTime - now; 3737 } 3738 3739 bool CConnman::OutboundTargetReached(bool historicalBlockServingLimit) const 3740 { 3741 AssertLockNotHeld(m_total_bytes_sent_mutex); 3742 LOCK(m_total_bytes_sent_mutex); 3743 if (nMaxOutboundLimit == 0) 3744 return false; 3745 3746 if (historicalBlockServingLimit) 3747 { 3748 // keep a large enough buffer to at least relay each block once 3749 const std::chrono::seconds timeLeftInCycle = GetMaxOutboundTimeLeftInCycle_(); 3750 const uint64_t buffer = timeLeftInCycle / std::chrono::minutes{10} * MAX_BLOCK_SERIALIZED_SIZE; 3751 if (buffer >= nMaxOutboundLimit || nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit - buffer) 3752 return true; 3753 } 3754 else if (nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit) 3755 return true; 3756 3757 return false; 3758 } 3759 3760 uint64_t CConnman::GetOutboundTargetBytesLeft() const 3761 { 3762 AssertLockNotHeld(m_total_bytes_sent_mutex); 3763 LOCK(m_total_bytes_sent_mutex); 3764 if (nMaxOutboundLimit == 0) 3765 return 0; 3766 3767 return (nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit) ? 0 : nMaxOutboundLimit - nMaxOutboundTotalBytesSentInCycle; 3768 } 3769 3770 uint64_t CConnman::GetTotalBytesRecv() const 3771 { 3772 return nTotalBytesRecv; 3773 } 3774 3775 uint64_t CConnman::GetTotalBytesSent() const 3776 { 3777 AssertLockNotHeld(m_total_bytes_sent_mutex); 3778 LOCK(m_total_bytes_sent_mutex); 3779 return nTotalBytesSent; 3780 } 3781 3782 ServiceFlags CConnman::GetLocalServices() const 3783 { 3784 return m_local_services; 3785 } 3786 3787 static std::unique_ptr<Transport> MakeTransport(NodeId id, bool use_v2transport, bool inbound) noexcept 3788 { 3789 if (use_v2transport) { 3790 return std::make_unique<V2Transport>(id, /*initiating=*/!inbound); 3791 } else { 3792 return std::make_unique<V1Transport>(id); 3793 } 3794 } 3795 3796 CNode::CNode(NodeId idIn, 3797 std::shared_ptr<Sock> sock, 3798 const CAddress& addrIn, 3799 uint64_t nKeyedNetGroupIn, 3800 uint64_t nLocalHostNonceIn, 3801 const CService& addrBindIn, 3802 const std::string& addrNameIn, 3803 ConnectionType conn_type_in, 3804 bool inbound_onion, 3805 CNodeOptions&& node_opts) 3806 : m_transport{MakeTransport(idIn, node_opts.use_v2transport, conn_type_in == ConnectionType::INBOUND)}, 3807 m_permission_flags{node_opts.permission_flags}, 3808 m_sock{sock}, 3809 m_connected{GetTime<std::chrono::seconds>()}, 3810 addr{addrIn}, 3811 addrBind{addrBindIn}, 3812 m_addr_name{addrNameIn.empty() ? addr.ToStringAddrPort() : addrNameIn}, 3813 m_dest(addrNameIn), 3814 m_inbound_onion{inbound_onion}, 3815 m_prefer_evict{node_opts.prefer_evict}, 3816 nKeyedNetGroup{nKeyedNetGroupIn}, 3817 m_conn_type{conn_type_in}, 3818 id{idIn}, 3819 nLocalHostNonce{nLocalHostNonceIn}, 3820 m_recv_flood_size{node_opts.recv_flood_size}, 3821 m_i2p_sam_session{std::move(node_opts.i2p_sam_session)} 3822 { 3823 if (inbound_onion) assert(conn_type_in == ConnectionType::INBOUND); 3824 3825 for (const auto& msg : ALL_NET_MESSAGE_TYPES) { 3826 mapRecvBytesPerMsgType[msg] = 0; 3827 } 3828 mapRecvBytesPerMsgType[NET_MESSAGE_TYPE_OTHER] = 0; 3829 3830 if (fLogIPs) { 3831 LogDebug(BCLog::NET, "Added connection to %s peer=%d\n", m_addr_name, id); 3832 } else { 3833 LogDebug(BCLog::NET, "Added connection peer=%d\n", id); 3834 } 3835 } 3836 3837 void CNode::MarkReceivedMsgsForProcessing() 3838 { 3839 AssertLockNotHeld(m_msg_process_queue_mutex); 3840 3841 size_t nSizeAdded = 0; 3842 for (const auto& msg : vRecvMsg) { 3843 // vRecvMsg contains only completed CNetMessage 3844 // the single possible partially deserialized message are held by TransportDeserializer 3845 nSizeAdded += msg.GetMemoryUsage(); 3846 } 3847 3848 LOCK(m_msg_process_queue_mutex); 3849 m_msg_process_queue.splice(m_msg_process_queue.end(), vRecvMsg); 3850 m_msg_process_queue_size += nSizeAdded; 3851 fPauseRecv = m_msg_process_queue_size > m_recv_flood_size; 3852 } 3853 3854 std::optional<std::pair<CNetMessage, bool>> CNode::PollMessage() 3855 { 3856 LOCK(m_msg_process_queue_mutex); 3857 if (m_msg_process_queue.empty()) return std::nullopt; 3858 3859 std::list<CNetMessage> msgs; 3860 // Just take one message 3861 msgs.splice(msgs.begin(), m_msg_process_queue, m_msg_process_queue.begin()); 3862 m_msg_process_queue_size -= msgs.front().GetMemoryUsage(); 3863 fPauseRecv = m_msg_process_queue_size > m_recv_flood_size; 3864 3865 return std::make_pair(std::move(msgs.front()), !m_msg_process_queue.empty()); 3866 } 3867 3868 bool CConnman::NodeFullyConnected(const CNode* pnode) 3869 { 3870 return pnode && pnode->fSuccessfullyConnected && !pnode->fDisconnect; 3871 } 3872 3873 void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg) 3874 { 3875 AssertLockNotHeld(m_total_bytes_sent_mutex); 3876 size_t nMessageSize = msg.data.size(); 3877 LogDebug(BCLog::NET, "sending %s (%d bytes) peer=%d\n", msg.m_type, nMessageSize, pnode->GetId()); 3878 if (gArgs.GetBoolArg("-capturemessages", false)) { 3879 CaptureMessage(pnode->addr, msg.m_type, msg.data, /*is_incoming=*/false); 3880 } 3881 3882 TRACEPOINT(net, outbound_message, 3883 pnode->GetId(), 3884 pnode->m_addr_name.c_str(), 3885 pnode->ConnectionTypeAsString().c_str(), 3886 msg.m_type.c_str(), 3887 msg.data.size(), 3888 msg.data.data() 3889 ); 3890 3891 size_t nBytesSent = 0; 3892 { 3893 LOCK(pnode->cs_vSend); 3894 // Check if the transport still has unsent bytes, and indicate to it that we're about to 3895 // give it a message to send. 3896 const auto& [to_send, more, _msg_type] = 3897 pnode->m_transport->GetBytesToSend(/*have_next_message=*/true); 3898 const bool queue_was_empty{to_send.empty() && pnode->vSendMsg.empty()}; 3899 3900 // Update memory usage of send buffer. 3901 pnode->m_send_memusage += msg.GetMemoryUsage(); 3902 if (pnode->m_send_memusage + pnode->m_transport->GetSendMemoryUsage() > nSendBufferMaxSize) pnode->fPauseSend = true; 3903 // Move message to vSendMsg queue. 3904 pnode->vSendMsg.push_back(std::move(msg)); 3905 3906 // If there was nothing to send before, and there is now (predicted by the "more" value 3907 // returned by the GetBytesToSend call above), attempt "optimistic write": 3908 // because the poll/select loop may pause for SELECT_TIMEOUT_MILLISECONDS before actually 3909 // doing a send, try sending from the calling thread if the queue was empty before. 3910 // With a V1Transport, more will always be true here, because adding a message always 3911 // results in sendable bytes there, but with V2Transport this is not the case (it may 3912 // still be in the handshake). 3913 if (queue_was_empty && more) { 3914 std::tie(nBytesSent, std::ignore) = SocketSendData(*pnode); 3915 } 3916 } 3917 if (nBytesSent) RecordBytesSent(nBytesSent); 3918 } 3919 3920 bool CConnman::ForNode(NodeId id, std::function<bool(CNode* pnode)> func) 3921 { 3922 CNode* found = nullptr; 3923 LOCK(m_nodes_mutex); 3924 for (auto&& pnode : m_nodes) { 3925 if(pnode->GetId() == id) { 3926 found = pnode; 3927 break; 3928 } 3929 } 3930 return found != nullptr && NodeFullyConnected(found) && func(found); 3931 } 3932 3933 CSipHasher CConnman::GetDeterministicRandomizer(uint64_t id) const 3934 { 3935 return CSipHasher(nSeed0, nSeed1).Write(id); 3936 } 3937 3938 uint64_t CConnman::CalculateKeyedNetGroup(const CNetAddr& address) const 3939 { 3940 std::vector<unsigned char> vchNetGroup(m_netgroupman.GetGroup(address)); 3941 3942 return GetDeterministicRandomizer(RANDOMIZER_ID_NETGROUP).Write(vchNetGroup).Finalize(); 3943 } 3944 3945 void CConnman::PerformReconnections() 3946 { 3947 AssertLockNotHeld(m_reconnections_mutex); 3948 AssertLockNotHeld(m_unused_i2p_sessions_mutex); 3949 while (true) { 3950 // Move first element of m_reconnections to todo (avoiding an allocation inside the lock). 3951 decltype(m_reconnections) todo; 3952 { 3953 LOCK(m_reconnections_mutex); 3954 if (m_reconnections.empty()) break; 3955 todo.splice(todo.end(), m_reconnections, m_reconnections.begin()); 3956 } 3957 3958 auto& item = *todo.begin(); 3959 OpenNetworkConnection(item.addr_connect, 3960 // We only reconnect if the first attempt to connect succeeded at 3961 // connection time, but then failed after the CNode object was 3962 // created. Since we already know connecting is possible, do not 3963 // count failure to reconnect. 3964 /*fCountFailure=*/false, 3965 std::move(item.grant), 3966 item.destination.empty() ? nullptr : item.destination.c_str(), 3967 item.conn_type, 3968 item.use_v2transport); 3969 } 3970 } 3971 3972 void CConnman::ASMapHealthCheck() 3973 { 3974 const std::vector<CAddress> v4_addrs{GetAddresses(/*max_addresses=*/ 0, /*max_pct=*/ 0, Network::NET_IPV4, /*filtered=*/ false)}; 3975 const std::vector<CAddress> v6_addrs{GetAddresses(/*max_addresses=*/ 0, /*max_pct=*/ 0, Network::NET_IPV6, /*filtered=*/ false)}; 3976 std::vector<CNetAddr> clearnet_addrs; 3977 clearnet_addrs.reserve(v4_addrs.size() + v6_addrs.size()); 3978 std::transform(v4_addrs.begin(), v4_addrs.end(), std::back_inserter(clearnet_addrs), 3979 [](const CAddress& addr) { return static_cast<CNetAddr>(addr); }); 3980 std::transform(v6_addrs.begin(), v6_addrs.end(), std::back_inserter(clearnet_addrs), 3981 [](const CAddress& addr) { return static_cast<CNetAddr>(addr); }); 3982 m_netgroupman.ASMapHealthCheck(clearnet_addrs); 3983 } 3984 3985 // Dump binary message to file, with timestamp. 3986 static void CaptureMessageToFile(const CAddress& addr, 3987 const std::string& msg_type, 3988 std::span<const unsigned char> data, 3989 bool is_incoming) 3990 { 3991 // Note: This function captures the message at the time of processing, 3992 // not at socket receive/send time. 3993 // This ensures that the messages are always in order from an application 3994 // layer (processing) perspective. 3995 auto now = GetTime<std::chrono::microseconds>(); 3996 3997 // Windows folder names cannot include a colon 3998 std::string clean_addr = addr.ToStringAddrPort(); 3999 std::replace(clean_addr.begin(), clean_addr.end(), ':', '_'); 4000 4001 fs::path base_path = gArgs.GetDataDirNet() / "message_capture" / fs::u8path(clean_addr); 4002 fs::create_directories(base_path); 4003 4004 fs::path path = base_path / (is_incoming ? "msgs_recv.dat" : "msgs_sent.dat"); 4005 AutoFile f{fsbridge::fopen(path, "ab")}; 4006 4007 ser_writedata64(f, now.count()); 4008 f << std::span{msg_type}; 4009 for (auto i = msg_type.length(); i < CMessageHeader::MESSAGE_TYPE_SIZE; ++i) { 4010 f << uint8_t{'\0'}; 4011 } 4012 uint32_t size = data.size(); 4013 ser_writedata32(f, size); 4014 f << data; 4015 } 4016 4017 std::function<void(const CAddress& addr, 4018 const std::string& msg_type, 4019 std::span<const unsigned char> data, 4020 bool is_incoming)> 4021 CaptureMessage = CaptureMessageToFile;