/ src / test / fuzz / util / net.cpp
net.cpp
  1  // Copyright (c) 2009-present The Bitcoin Core developers
  2  // Distributed under the MIT software license, see the accompanying
  3  // file COPYING or http://www.opensource.org/licenses/mit-license.php.
  4  
  5  #include <test/fuzz/util/net.h>
  6  
  7  #include <compat/compat.h>
  8  #include <netaddress.h>
  9  #include <node/protocol_version.h>
 10  #include <protocol.h>
 11  #include <test/fuzz/FuzzedDataProvider.h>
 12  #include <test/fuzz/util.h>
 13  #include <test/util/net.h>
 14  #include <util/sock.h>
 15  #include <util/time.h>
 16  
 17  #include <array>
 18  #include <cassert>
 19  #include <cerrno>
 20  #include <cstdint>
 21  #include <cstdlib>
 22  #include <cstring>
 23  #include <ranges>
 24  #include <thread>
 25  #include <vector>
 26  
 27  class CNode;
 28  
 29  CNetAddr ConsumeNetAddr(FuzzedDataProvider& fuzzed_data_provider, FastRandomContext* rand) noexcept
 30  {
 31      struct NetAux {
 32          Network net;
 33          CNetAddr::BIP155Network bip155;
 34          size_t len;
 35      };
 36  
 37      static constexpr std::array<NetAux, 6> nets{
 38          NetAux{.net = Network::NET_IPV4, .bip155 = CNetAddr::BIP155Network::IPV4, .len = ADDR_IPV4_SIZE},
 39          NetAux{.net = Network::NET_IPV6, .bip155 = CNetAddr::BIP155Network::IPV6, .len = ADDR_IPV6_SIZE},
 40          NetAux{.net = Network::NET_ONION, .bip155 = CNetAddr::BIP155Network::TORV3, .len = ADDR_TORV3_SIZE},
 41          NetAux{.net = Network::NET_I2P, .bip155 = CNetAddr::BIP155Network::I2P, .len = ADDR_I2P_SIZE},
 42          NetAux{.net = Network::NET_CJDNS, .bip155 = CNetAddr::BIP155Network::CJDNS, .len = ADDR_CJDNS_SIZE},
 43          NetAux{.net = Network::NET_INTERNAL, .bip155 = CNetAddr::BIP155Network{0}, .len = 0},
 44      };
 45  
 46      const size_t nets_index{rand == nullptr
 47          ? fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, nets.size() - 1)
 48          : static_cast<size_t>(rand->randrange(nets.size()))};
 49  
 50      const auto& aux = nets[nets_index];
 51  
 52      CNetAddr addr;
 53  
 54      if (aux.net == Network::NET_INTERNAL) {
 55          if (rand == nullptr) {
 56              addr.SetInternal(fuzzed_data_provider.ConsumeBytesAsString(32));
 57          } else {
 58              const auto v = rand->randbytes(32);
 59              addr.SetInternal(std::string{v.begin(), v.end()});
 60          }
 61          return addr;
 62      }
 63  
 64      DataStream s;
 65  
 66      s << static_cast<uint8_t>(aux.bip155);
 67  
 68      std::vector<uint8_t> addr_bytes;
 69      if (rand == nullptr) {
 70          addr_bytes = fuzzed_data_provider.ConsumeBytes<uint8_t>(aux.len);
 71          addr_bytes.resize(aux.len);
 72      } else {
 73          addr_bytes = rand->randbytes(aux.len);
 74      }
 75      if (aux.net == NET_IPV6 && addr_bytes[0] == CJDNS_PREFIX) { // Avoid generating IPv6 addresses that look like CJDNS.
 76          addr_bytes[0] = 0x55; // Just an arbitrary number, anything != CJDNS_PREFIX would do.
 77      }
 78      if (aux.net == NET_CJDNS) { // Avoid generating CJDNS addresses that don't start with CJDNS_PREFIX because those are !IsValid().
 79          addr_bytes[0] = CJDNS_PREFIX;
 80      }
 81      s << addr_bytes;
 82  
 83      s >> CAddress::V2_NETWORK(addr);
 84  
 85      return addr;
 86  }
 87  
 88  CAddress ConsumeAddress(FuzzedDataProvider& fuzzed_data_provider) noexcept
 89  {
 90      return {ConsumeService(fuzzed_data_provider), ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS), NodeSeconds{std::chrono::seconds{fuzzed_data_provider.ConsumeIntegral<uint32_t>()}}};
 91  }
 92  
 93  template <typename P>
 94  P ConsumeDeserializationParams(FuzzedDataProvider& fuzzed_data_provider) noexcept
 95  {
 96      constexpr std::array ADDR_ENCODINGS{
 97          CNetAddr::Encoding::V1,
 98          CNetAddr::Encoding::V2,
 99      };
100      constexpr std::array ADDR_FORMATS{
101          CAddress::Format::Disk,
102          CAddress::Format::Network,
103      };
104      if constexpr (std::is_same_v<P, CNetAddr::SerParams>) {
105          return P{PickValue(fuzzed_data_provider, ADDR_ENCODINGS)};
106      }
107      if constexpr (std::is_same_v<P, CAddress::SerParams>) {
108          return P{{PickValue(fuzzed_data_provider, ADDR_ENCODINGS)}, PickValue(fuzzed_data_provider, ADDR_FORMATS)};
109      }
110  }
111  template CNetAddr::SerParams ConsumeDeserializationParams(FuzzedDataProvider&) noexcept;
112  template CAddress::SerParams ConsumeDeserializationParams(FuzzedDataProvider&) noexcept;
113  
114  FuzzedSock::FuzzedSock(FuzzedDataProvider& fuzzed_data_provider)
115      : Sock{fuzzed_data_provider.ConsumeIntegralInRange<SOCKET>(INVALID_SOCKET - 1, INVALID_SOCKET)},
116        m_fuzzed_data_provider{fuzzed_data_provider},
117        m_selectable{fuzzed_data_provider.ConsumeBool()},
118        m_time{MockableSteadyClock::INITIAL_MOCK_TIME}
119  {
120      ElapseTime(std::chrono::seconds(0)); // start mocking the steady clock.
121  }
122  
123  FuzzedSock::~FuzzedSock()
124  {
125      // Sock::~Sock() will be called after FuzzedSock::~FuzzedSock() and it will call
126      // close(m_socket) if m_socket is not INVALID_SOCKET.
127      // Avoid closing an arbitrary file descriptor (m_socket is just a random very high number which
128      // theoretically may concide with a real opened file descriptor).
129      m_socket = INVALID_SOCKET;
130  }
131  
132  void FuzzedSock::ElapseTime(std::chrono::milliseconds duration) const
133  {
134      m_time += duration;
135      MockableSteadyClock::SetMockTime(m_time);
136  }
137  
138  FuzzedSock& FuzzedSock::operator=(Sock&& other)
139  {
140      assert(false && "Move of Sock into FuzzedSock not allowed.");
141      return *this;
142  }
143  
144  ssize_t FuzzedSock::Send(const void* data, size_t len, int flags) const
145  {
146      constexpr std::array send_errnos{
147          EACCES,
148          EAGAIN,
149          EALREADY,
150          EBADF,
151          ECONNRESET,
152          EDESTADDRREQ,
153          EFAULT,
154          EINTR,
155          EINVAL,
156          EISCONN,
157          EMSGSIZE,
158          ENOBUFS,
159          ENOMEM,
160          ENOTCONN,
161          ENOTSOCK,
162          EOPNOTSUPP,
163          EPIPE,
164          EWOULDBLOCK,
165      };
166      if (m_fuzzed_data_provider.ConsumeBool()) {
167          return len;
168      }
169      const ssize_t r = m_fuzzed_data_provider.ConsumeIntegralInRange<ssize_t>(-1, len);
170      if (r == -1) {
171          SetFuzzedErrNo(m_fuzzed_data_provider, send_errnos);
172      }
173      return r;
174  }
175  
176  ssize_t FuzzedSock::Recv(void* buf, size_t len, int flags) const
177  {
178      // Have a permanent error at recv_errnos[0] because when the fuzzed data is exhausted
179      // SetFuzzedErrNo() will always return the first element and we want to avoid Recv()
180      // returning -1 and setting errno to EAGAIN repeatedly.
181      constexpr std::array recv_errnos{
182          ECONNREFUSED,
183          EAGAIN,
184          EBADF,
185          EFAULT,
186          EINTR,
187          EINVAL,
188          ENOMEM,
189          ENOTCONN,
190          ENOTSOCK,
191          EWOULDBLOCK,
192      };
193      assert(buf != nullptr || len == 0);
194  
195      // Do the latency before any of the "return" statements.
196      if (m_fuzzed_data_provider.ConsumeBool() && std::getenv("FUZZED_SOCKET_FAKE_LATENCY") != nullptr) {
197          std::this_thread::sleep_for(std::chrono::milliseconds{2});
198      }
199  
200      if (len == 0 || m_fuzzed_data_provider.ConsumeBool()) {
201          const ssize_t r = m_fuzzed_data_provider.ConsumeBool() ? 0 : -1;
202          if (r == -1) {
203              SetFuzzedErrNo(m_fuzzed_data_provider, recv_errnos);
204          }
205          return r;
206      }
207  
208      size_t copied_so_far{0};
209  
210      if (!m_peek_data.empty()) {
211          // `MSG_PEEK` was used in the preceding `Recv()` call, copy the first bytes from `m_peek_data`.
212          const size_t copy_len{std::min(len, m_peek_data.size())};
213          std::memcpy(buf, m_peek_data.data(), copy_len);
214          copied_so_far += copy_len;
215          if ((flags & MSG_PEEK) == 0) {
216              m_peek_data.erase(m_peek_data.begin(), m_peek_data.begin() + copy_len);
217          }
218      }
219  
220      if (copied_so_far == len) {
221          return copied_so_far;
222      }
223  
224      auto new_data = ConsumeRandomLengthByteVector(m_fuzzed_data_provider, len - copied_so_far);
225      if (new_data.empty()) return copied_so_far;
226  
227      std::memcpy(reinterpret_cast<uint8_t*>(buf) + copied_so_far, new_data.data(), new_data.size());
228      copied_so_far += new_data.size();
229  
230      if ((flags & MSG_PEEK) != 0) {
231          m_peek_data.insert(m_peek_data.end(), new_data.begin(), new_data.end());
232      }
233  
234      if (copied_so_far == len || m_fuzzed_data_provider.ConsumeBool()) {
235          return copied_so_far;
236      }
237  
238      // Pad to len bytes.
239      std::memset(reinterpret_cast<uint8_t*>(buf) + copied_so_far, 0x0, len - copied_so_far);
240  
241      return len;
242  }
243  
244  int FuzzedSock::Connect(const sockaddr*, socklen_t) const
245  {
246      // Have a permanent error at connect_errnos[0] because when the fuzzed data is exhausted
247      // SetFuzzedErrNo() will always return the first element and we want to avoid Connect()
248      // returning -1 and setting errno to EAGAIN repeatedly.
249      constexpr std::array connect_errnos{
250          ECONNREFUSED,
251          EAGAIN,
252          ECONNRESET,
253          EHOSTUNREACH,
254          EINPROGRESS,
255          EINTR,
256          ENETUNREACH,
257          ETIMEDOUT,
258      };
259      if (m_fuzzed_data_provider.ConsumeBool()) {
260          SetFuzzedErrNo(m_fuzzed_data_provider, connect_errnos);
261          return -1;
262      }
263      return 0;
264  }
265  
266  int FuzzedSock::Bind(const sockaddr*, socklen_t) const
267  {
268      // Have a permanent error at bind_errnos[0] because when the fuzzed data is exhausted
269      // SetFuzzedErrNo() will always set the global errno to bind_errnos[0]. We want to
270      // avoid this method returning -1 and setting errno to a temporary error (like EAGAIN)
271      // repeatedly because proper code should retry on temporary errors, leading to an
272      // infinite loop.
273      constexpr std::array bind_errnos{
274          EACCES,
275          EADDRINUSE,
276          EADDRNOTAVAIL,
277          EAGAIN,
278      };
279      if (m_fuzzed_data_provider.ConsumeBool()) {
280          SetFuzzedErrNo(m_fuzzed_data_provider, bind_errnos);
281          return -1;
282      }
283      return 0;
284  }
285  
286  int FuzzedSock::Listen(int) const
287  {
288      // Have a permanent error at listen_errnos[0] because when the fuzzed data is exhausted
289      // SetFuzzedErrNo() will always set the global errno to listen_errnos[0]. We want to
290      // avoid this method returning -1 and setting errno to a temporary error (like EAGAIN)
291      // repeatedly because proper code should retry on temporary errors, leading to an
292      // infinite loop.
293      constexpr std::array listen_errnos{
294          EADDRINUSE,
295          EINVAL,
296          EOPNOTSUPP,
297      };
298      if (m_fuzzed_data_provider.ConsumeBool()) {
299          SetFuzzedErrNo(m_fuzzed_data_provider, listen_errnos);
300          return -1;
301      }
302      return 0;
303  }
304  
305  std::unique_ptr<Sock> FuzzedSock::Accept(sockaddr* addr, socklen_t* addr_len) const
306  {
307      constexpr std::array accept_errnos{
308          ECONNABORTED,
309          EINTR,
310          ENOMEM,
311      };
312      if (m_fuzzed_data_provider.ConsumeBool()) {
313          SetFuzzedErrNo(m_fuzzed_data_provider, accept_errnos);
314          return std::unique_ptr<FuzzedSock>();
315      }
316      if (addr != nullptr) {
317          // Set a fuzzed address in the output argument addr.
318          memset(addr, 0x00, *addr_len);
319          if (m_fuzzed_data_provider.ConsumeBool()) {
320              // IPv4
321              const socklen_t write_len = static_cast<socklen_t>(sizeof(sockaddr_in));
322              if (*addr_len >= write_len) {
323                  *addr_len = write_len;
324                  auto addr4 = reinterpret_cast<sockaddr_in*>(addr);
325                  addr4->sin_family = AF_INET;
326                  const auto sin_addr_bytes{m_fuzzed_data_provider.ConsumeBytes<std::byte>(sizeof(addr4->sin_addr))};
327                  std::ranges::copy(sin_addr_bytes, reinterpret_cast<std::byte*>(&addr4->sin_addr));
328                  addr4->sin_port = m_fuzzed_data_provider.ConsumeIntegralInRange<uint16_t>(1, 65535);
329              }
330          } else {
331              // IPv6
332              const socklen_t write_len = static_cast<socklen_t>(sizeof(sockaddr_in6));
333              if (*addr_len >= write_len) {
334                  *addr_len = write_len;
335                  auto addr6 = reinterpret_cast<sockaddr_in6*>(addr);
336                  addr6->sin6_family = AF_INET6;
337                  const auto sin_addr_bytes{m_fuzzed_data_provider.ConsumeBytes<std::byte>(sizeof(addr6->sin6_addr))};
338                  std::ranges::copy(sin_addr_bytes, reinterpret_cast<std::byte*>(&addr6->sin6_addr));
339                  addr6->sin6_port = m_fuzzed_data_provider.ConsumeIntegralInRange<uint16_t>(1, 65535);
340              }
341          }
342      }
343      return std::make_unique<FuzzedSock>(m_fuzzed_data_provider);
344  }
345  
346  int FuzzedSock::GetSockOpt(int level, int opt_name, void* opt_val, socklen_t* opt_len) const
347  {
348      constexpr std::array getsockopt_errnos{
349          ENOMEM,
350          ENOBUFS,
351      };
352      if (m_fuzzed_data_provider.ConsumeBool()) {
353          SetFuzzedErrNo(m_fuzzed_data_provider, getsockopt_errnos);
354          return -1;
355      }
356      if (opt_val == nullptr) {
357          return 0;
358      }
359      std::memcpy(opt_val,
360                  ConsumeFixedLengthByteVector(m_fuzzed_data_provider, *opt_len).data(),
361                  *opt_len);
362      return 0;
363  }
364  
365  int FuzzedSock::SetSockOpt(int, int, const void*, socklen_t) const
366  {
367      constexpr std::array setsockopt_errnos{
368          ENOMEM,
369          ENOBUFS,
370      };
371      if (m_fuzzed_data_provider.ConsumeBool()) {
372          SetFuzzedErrNo(m_fuzzed_data_provider, setsockopt_errnos);
373          return -1;
374      }
375      return 0;
376  }
377  
378  int FuzzedSock::GetSockName(sockaddr* name, socklen_t* name_len) const
379  {
380      constexpr std::array getsockname_errnos{
381          ECONNRESET,
382          ENOBUFS,
383      };
384      if (m_fuzzed_data_provider.ConsumeBool()) {
385          SetFuzzedErrNo(m_fuzzed_data_provider, getsockname_errnos);
386          return -1;
387      }
388      assert(name_len);
389      const auto bytes{ConsumeRandomLengthByteVector(m_fuzzed_data_provider, *name_len)};
390      if (bytes.size() < (int)sizeof(sockaddr)) return -1;
391      std::memcpy(name, bytes.data(), bytes.size());
392      *name_len = bytes.size();
393      return 0;
394  }
395  
396  bool FuzzedSock::SetNonBlocking() const
397  {
398      constexpr std::array setnonblocking_errnos{
399          EBADF,
400          EPERM,
401      };
402      if (m_fuzzed_data_provider.ConsumeBool()) {
403          SetFuzzedErrNo(m_fuzzed_data_provider, setnonblocking_errnos);
404          return false;
405      }
406      return true;
407  }
408  
409  bool FuzzedSock::IsSelectable() const
410  {
411      return m_selectable;
412  }
413  
414  bool FuzzedSock::Wait(std::chrono::milliseconds timeout, Event requested, Event* occurred) const
415  {
416      constexpr std::array wait_errnos{
417          EBADF,
418          EINTR,
419          EINVAL,
420      };
421      if (m_fuzzed_data_provider.ConsumeBool()) {
422          SetFuzzedErrNo(m_fuzzed_data_provider, wait_errnos);
423          return false;
424      }
425      if (occurred != nullptr) {
426          // We simulate the requested event as occurred when ConsumeBool()
427          // returns false. This avoids simulating endless waiting if the
428          // FuzzedDataProvider runs out of data.
429          *occurred = m_fuzzed_data_provider.ConsumeBool() ? 0 : requested;
430      }
431      ElapseTime(timeout);
432      return true;
433  }
434  
435  bool FuzzedSock::WaitMany(std::chrono::milliseconds timeout, EventsPerSock& events_per_sock) const
436  {
437      for (auto& [sock, events] : events_per_sock) {
438          (void)sock;
439          // We simulate the requested event as occurred when ConsumeBool()
440          // returns false. This avoids simulating endless waiting if the
441          // FuzzedDataProvider runs out of data.
442          events.occurred = m_fuzzed_data_provider.ConsumeBool() ? 0 : events.requested;
443      }
444      ElapseTime(timeout);
445      return true;
446  }
447  
448  bool FuzzedSock::IsConnected(std::string& errmsg) const
449  {
450      if (m_fuzzed_data_provider.ConsumeBool()) {
451          return true;
452      }
453      errmsg = "disconnected at random by the fuzzer";
454      return false;
455  }
456  
457  void FillNode(FuzzedDataProvider& fuzzed_data_provider, ConnmanTestMsg& connman, CNode& node) noexcept
458  {
459      auto successfully_connected = fuzzed_data_provider.ConsumeBool();
460      auto remote_services = ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS);
461      auto local_services = ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS);
462      auto version = fuzzed_data_provider.ConsumeIntegralInRange<int32_t>(MIN_PEER_PROTO_VERSION, std::numeric_limits<int32_t>::max());
463      auto relay_txs = fuzzed_data_provider.ConsumeBool();
464      connman.Handshake(node, successfully_connected, remote_services, local_services, version, relay_txs);
465  }