/ src / random.cpp
random.cpp
  1  // Copyright (c) 2009-2010 Satoshi Nakamoto
  2  // Copyright (c) 2009-2022 The Bitcoin Core developers
  3  // Distributed under the MIT software license, see the accompanying
  4  // file COPYING or http://www.opensource.org/licenses/mit-license.php.
  5  
  6  #if defined(HAVE_CONFIG_H)
  7  #include <config/bitcoin-config.h>
  8  #endif
  9  
 10  #include <random.h>
 11  
 12  #include <compat/compat.h>
 13  #include <compat/cpuid.h>
 14  #include <crypto/chacha20.h>
 15  #include <crypto/sha256.h>
 16  #include <crypto/sha512.h>
 17  #include <logging.h>
 18  #include <randomenv.h>
 19  #include <span.h>
 20  #include <support/allocators/secure.h>
 21  #include <support/cleanse.h>
 22  #include <sync.h>
 23  #include <util/time.h>
 24  
 25  #include <array>
 26  #include <cmath>
 27  #include <cstdlib>
 28  #include <thread>
 29  
 30  #ifdef WIN32
 31  #include <windows.h>
 32  #include <wincrypt.h>
 33  #else
 34  #include <fcntl.h>
 35  #include <sys/time.h>
 36  #endif
 37  
 38  #if defined(HAVE_GETRANDOM) || (defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX))
 39  #include <sys/random.h>
 40  #endif
 41  
 42  #ifdef HAVE_SYSCTL_ARND
 43  #include <sys/sysctl.h>
 44  #endif
 45  #if defined(HAVE_STRONG_GETAUXVAL) && defined(__aarch64__)
 46  #include <sys/auxv.h>
 47  #endif
 48  
 49  [[noreturn]] static void RandFailure()
 50  {
 51      LogPrintf("Failed to read randomness, aborting\n");
 52      std::abort();
 53  }
 54  
 55  static inline int64_t GetPerformanceCounter() noexcept
 56  {
 57      // Read the hardware time stamp counter when available.
 58      // See https://en.wikipedia.org/wiki/Time_Stamp_Counter for more information.
 59  #if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
 60      return __rdtsc();
 61  #elif !defined(_MSC_VER) && defined(__i386__)
 62      uint64_t r = 0;
 63      __asm__ volatile ("rdtsc" : "=A"(r)); // Constrain the r variable to the eax:edx pair.
 64      return r;
 65  #elif !defined(_MSC_VER) && (defined(__x86_64__) || defined(__amd64__))
 66      uint64_t r1 = 0, r2 = 0;
 67      __asm__ volatile ("rdtsc" : "=a"(r1), "=d"(r2)); // Constrain r1 to rax and r2 to rdx.
 68      return (r2 << 32) | r1;
 69  #else
 70      // Fall back to using standard library clock (usually microsecond or nanosecond precision)
 71      return std::chrono::high_resolution_clock::now().time_since_epoch().count();
 72  #endif
 73  }
 74  
 75  #ifdef HAVE_GETCPUID
 76  static bool g_rdrand_supported = false;
 77  static bool g_rdseed_supported = false;
 78  static constexpr uint32_t CPUID_F1_ECX_RDRAND = 0x40000000;
 79  static constexpr uint32_t CPUID_F7_EBX_RDSEED = 0x00040000;
 80  #ifdef bit_RDRND
 81  static_assert(CPUID_F1_ECX_RDRAND == bit_RDRND, "Unexpected value for bit_RDRND");
 82  #endif
 83  #ifdef bit_RDSEED
 84  static_assert(CPUID_F7_EBX_RDSEED == bit_RDSEED, "Unexpected value for bit_RDSEED");
 85  #endif
 86  
 87  static void InitHardwareRand()
 88  {
 89      uint32_t eax, ebx, ecx, edx;
 90      GetCPUID(1, 0, eax, ebx, ecx, edx);
 91      if (ecx & CPUID_F1_ECX_RDRAND) {
 92          g_rdrand_supported = true;
 93      }
 94      GetCPUID(7, 0, eax, ebx, ecx, edx);
 95      if (ebx & CPUID_F7_EBX_RDSEED) {
 96          g_rdseed_supported = true;
 97      }
 98  }
 99  
100  static void ReportHardwareRand()
101  {
102      // This must be done in a separate function, as InitHardwareRand() may be indirectly called
103      // from global constructors, before logging is initialized.
104      if (g_rdseed_supported) {
105          LogPrintf("Using RdSeed as an additional entropy source\n");
106      }
107      if (g_rdrand_supported) {
108          LogPrintf("Using RdRand as an additional entropy source\n");
109      }
110  }
111  
112  /** Read 64 bits of entropy using rdrand.
113   *
114   * Must only be called when RdRand is supported.
115   */
116  static uint64_t GetRdRand() noexcept
117  {
118      // RdRand may very rarely fail. Invoke it up to 10 times in a loop to reduce this risk.
119  #ifdef __i386__
120      uint8_t ok;
121      // Initialize to 0 to silence a compiler warning that r1 or r2 may be used
122      // uninitialized. Even if rdrand fails (!ok) it will set the output to 0,
123      // but there is no way that the compiler could know that.
124      uint32_t r1 = 0, r2 = 0;
125      for (int i = 0; i < 10; ++i) {
126          __asm__ volatile (".byte 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdrand %eax
127          if (ok) break;
128      }
129      for (int i = 0; i < 10; ++i) {
130          __asm__ volatile (".byte 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r2), "=q"(ok) :: "cc"); // rdrand %eax
131          if (ok) break;
132      }
133      return (((uint64_t)r2) << 32) | r1;
134  #elif defined(__x86_64__) || defined(__amd64__)
135      uint8_t ok;
136      uint64_t r1 = 0; // See above why we initialize to 0.
137      for (int i = 0; i < 10; ++i) {
138          __asm__ volatile (".byte 0x48, 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdrand %rax
139          if (ok) break;
140      }
141      return r1;
142  #else
143  #error "RdRand is only supported on x86 and x86_64"
144  #endif
145  }
146  
147  /** Read 64 bits of entropy using rdseed.
148   *
149   * Must only be called when RdSeed is supported.
150   */
151  static uint64_t GetRdSeed() noexcept
152  {
153      // RdSeed may fail when the HW RNG is overloaded. Loop indefinitely until enough entropy is gathered,
154      // but pause after every failure.
155  #ifdef __i386__
156      uint8_t ok;
157      uint32_t r1, r2;
158      do {
159          __asm__ volatile (".byte 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdseed %eax
160          if (ok) break;
161          __asm__ volatile ("pause");
162      } while(true);
163      do {
164          __asm__ volatile (".byte 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r2), "=q"(ok) :: "cc"); // rdseed %eax
165          if (ok) break;
166          __asm__ volatile ("pause");
167      } while(true);
168      return (((uint64_t)r2) << 32) | r1;
169  #elif defined(__x86_64__) || defined(__amd64__)
170      uint8_t ok;
171      uint64_t r1;
172      do {
173          __asm__ volatile (".byte 0x48, 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdseed %rax
174          if (ok) break;
175          __asm__ volatile ("pause");
176      } while(true);
177      return r1;
178  #else
179  #error "RdSeed is only supported on x86 and x86_64"
180  #endif
181  }
182  
183  #elif defined(__aarch64__) && defined(HWCAP2_RNG)
184  
185  static bool g_rndr_supported = false;
186  
187  static void InitHardwareRand()
188  {
189      if (getauxval(AT_HWCAP2) & HWCAP2_RNG) {
190          g_rndr_supported = true;
191      }
192  }
193  
194  static void ReportHardwareRand()
195  {
196      // This must be done in a separate function, as InitHardwareRand() may be indirectly called
197      // from global constructors, before logging is initialized.
198      if (g_rndr_supported) {
199          LogPrintf("Using RNDR and RNDRRS as additional entropy sources\n");
200      }
201  }
202  
203  /** Read 64 bits of entropy using rndr.
204   *
205   * Must only be called when RNDR is supported.
206   */
207  static uint64_t GetRNDR() noexcept
208  {
209      uint8_t ok;
210      uint64_t r1;
211      do {
212          // https://developer.arm.com/documentation/ddi0601/2022-12/AArch64-Registers/RNDR--Random-Number
213          __asm__ volatile("mrs %0, s3_3_c2_c4_0; cset %w1, ne;"
214                           : "=r"(r1), "=r"(ok)::"cc");
215          if (ok) break;
216          __asm__ volatile("yield");
217      } while (true);
218      return r1;
219  }
220  
221  /** Read 64 bits of entropy using rndrrs.
222   *
223   * Must only be called when RNDRRS is supported.
224   */
225  static uint64_t GetRNDRRS() noexcept
226  {
227      uint8_t ok;
228      uint64_t r1;
229      do {
230          // https://developer.arm.com/documentation/ddi0601/2022-12/AArch64-Registers/RNDRRS--Reseeded-Random-Number
231          __asm__ volatile("mrs %0, s3_3_c2_c4_1; cset %w1, ne;"
232                           : "=r"(r1), "=r"(ok)::"cc");
233          if (ok) break;
234          __asm__ volatile("yield");
235      } while (true);
236      return r1;
237  }
238  
239  #else
240  /* Access to other hardware random number generators could be added here later,
241   * assuming it is sufficiently fast (in the order of a few hundred CPU cycles).
242   * Slower sources should probably be invoked separately, and/or only from
243   * RandAddPeriodic (which is called once a minute).
244   */
245  static void InitHardwareRand() {}
246  static void ReportHardwareRand() {}
247  #endif
248  
249  /** Add 64 bits of entropy gathered from hardware to hasher. Do nothing if not supported. */
250  static void SeedHardwareFast(CSHA512& hasher) noexcept {
251  #if defined(__x86_64__) || defined(__amd64__) || defined(__i386__)
252      if (g_rdrand_supported) {
253          uint64_t out = GetRdRand();
254          hasher.Write((const unsigned char*)&out, sizeof(out));
255          return;
256      }
257  #elif defined(__aarch64__) && defined(HWCAP2_RNG)
258      if (g_rndr_supported) {
259          uint64_t out = GetRNDR();
260          hasher.Write((const unsigned char*)&out, sizeof(out));
261          return;
262      }
263  #endif
264  }
265  
266  /** Add 256 bits of entropy gathered from hardware to hasher. Do nothing if not supported. */
267  static void SeedHardwareSlow(CSHA512& hasher) noexcept {
268  #if defined(__x86_64__) || defined(__amd64__) || defined(__i386__)
269      // When we want 256 bits of entropy, prefer RdSeed over RdRand, as it's
270      // guaranteed to produce independent randomness on every call.
271      if (g_rdseed_supported) {
272          for (int i = 0; i < 4; ++i) {
273              uint64_t out = GetRdSeed();
274              hasher.Write((const unsigned char*)&out, sizeof(out));
275          }
276          return;
277      }
278      // When falling back to RdRand, XOR the result of 1024 results.
279      // This guarantees a reseeding occurs between each.
280      if (g_rdrand_supported) {
281          for (int i = 0; i < 4; ++i) {
282              uint64_t out = 0;
283              for (int j = 0; j < 1024; ++j) out ^= GetRdRand();
284              hasher.Write((const unsigned char*)&out, sizeof(out));
285          }
286          return;
287      }
288  #elif defined(__aarch64__) && defined(HWCAP2_RNG)
289      if (g_rndr_supported) {
290          for (int i = 0; i < 4; ++i) {
291              uint64_t out = GetRNDRRS();
292              hasher.Write((const unsigned char*)&out, sizeof(out));
293          }
294          return;
295      }
296  #endif
297  }
298  
299  /** Use repeated SHA512 to strengthen the randomness in seed32, and feed into hasher. */
300  static void Strengthen(const unsigned char (&seed)[32], SteadyClock::duration dur, CSHA512& hasher) noexcept
301  {
302      CSHA512 inner_hasher;
303      inner_hasher.Write(seed, sizeof(seed));
304  
305      // Hash loop
306      unsigned char buffer[64];
307      const auto stop{SteadyClock::now() + dur};
308      do {
309          for (int i = 0; i < 1000; ++i) {
310              inner_hasher.Finalize(buffer);
311              inner_hasher.Reset();
312              inner_hasher.Write(buffer, sizeof(buffer));
313          }
314          // Benchmark operation and feed it into outer hasher.
315          int64_t perf = GetPerformanceCounter();
316          hasher.Write((const unsigned char*)&perf, sizeof(perf));
317      } while (SteadyClock::now() < stop);
318  
319      // Produce output from inner state and feed it to outer hasher.
320      inner_hasher.Finalize(buffer);
321      hasher.Write(buffer, sizeof(buffer));
322      // Try to clean up.
323      inner_hasher.Reset();
324      memory_cleanse(buffer, sizeof(buffer));
325  }
326  
327  #ifndef WIN32
328  /** Fallback: get 32 bytes of system entropy from /dev/urandom. The most
329   * compatible way to get cryptographic randomness on UNIX-ish platforms.
330   */
331  [[maybe_unused]] static void GetDevURandom(unsigned char *ent32)
332  {
333      int f = open("/dev/urandom", O_RDONLY);
334      if (f == -1) {
335          RandFailure();
336      }
337      int have = 0;
338      do {
339          ssize_t n = read(f, ent32 + have, NUM_OS_RANDOM_BYTES - have);
340          if (n <= 0 || n + have > NUM_OS_RANDOM_BYTES) {
341              close(f);
342              RandFailure();
343          }
344          have += n;
345      } while (have < NUM_OS_RANDOM_BYTES);
346      close(f);
347  }
348  #endif
349  
350  /** Get 32 bytes of system entropy. */
351  void GetOSRand(unsigned char *ent32)
352  {
353  #if defined(WIN32)
354      HCRYPTPROV hProvider;
355      int ret = CryptAcquireContextW(&hProvider, nullptr, nullptr, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT);
356      if (!ret) {
357          RandFailure();
358      }
359      ret = CryptGenRandom(hProvider, NUM_OS_RANDOM_BYTES, ent32);
360      if (!ret) {
361          RandFailure();
362      }
363      CryptReleaseContext(hProvider, 0);
364  #elif defined(HAVE_GETRANDOM)
365      /* Linux. From the getrandom(2) man page:
366       * "If the urandom source has been initialized, reads of up to 256 bytes
367       * will always return as many bytes as requested and will not be
368       * interrupted by signals."
369       */
370      if (getrandom(ent32, NUM_OS_RANDOM_BYTES, 0) != NUM_OS_RANDOM_BYTES) {
371          RandFailure();
372      }
373  #elif defined(__OpenBSD__)
374      /* OpenBSD. From the arc4random(3) man page:
375         "Use of these functions is encouraged for almost all random number
376          consumption because the other interfaces are deficient in either
377          quality, portability, standardization, or availability."
378         The function call is always successful.
379       */
380      arc4random_buf(ent32, NUM_OS_RANDOM_BYTES);
381  #elif defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX)
382      if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) {
383          RandFailure();
384      }
385  #elif defined(HAVE_SYSCTL_ARND)
386      /* FreeBSD, NetBSD and similar. It is possible for the call to return less
387       * bytes than requested, so need to read in a loop.
388       */
389      static int name[2] = {CTL_KERN, KERN_ARND};
390      int have = 0;
391      do {
392          size_t len = NUM_OS_RANDOM_BYTES - have;
393          if (sysctl(name, std::size(name), ent32 + have, &len, nullptr, 0) != 0) {
394              RandFailure();
395          }
396          have += len;
397      } while (have < NUM_OS_RANDOM_BYTES);
398  #else
399      /* Fall back to /dev/urandom if there is no specific method implemented to
400       * get system entropy for this OS.
401       */
402      GetDevURandom(ent32);
403  #endif
404  }
405  
406  namespace {
407  
408  class RNGState {
409      Mutex m_mutex;
410      /* The RNG state consists of 256 bits of entropy, taken from the output of
411       * one operation's SHA512 output, and fed as input to the next one.
412       * Carrying 256 bits of entropy should be sufficient to guarantee
413       * unpredictability as long as any entropy source was ever unpredictable
414       * to an attacker. To protect against situations where an attacker might
415       * observe the RNG's state, fresh entropy is always mixed when
416       * GetStrongRandBytes is called.
417       */
418      unsigned char m_state[32] GUARDED_BY(m_mutex) = {0};
419      uint64_t m_counter GUARDED_BY(m_mutex) = 0;
420      bool m_strongly_seeded GUARDED_BY(m_mutex) = false;
421  
422      Mutex m_events_mutex;
423      CSHA256 m_events_hasher GUARDED_BY(m_events_mutex);
424  
425  public:
426      RNGState() noexcept
427      {
428          InitHardwareRand();
429      }
430  
431      ~RNGState() = default;
432  
433      void AddEvent(uint32_t event_info) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_events_mutex)
434      {
435          LOCK(m_events_mutex);
436  
437          m_events_hasher.Write((const unsigned char *)&event_info, sizeof(event_info));
438          // Get the low four bytes of the performance counter. This translates to roughly the
439          // subsecond part.
440          uint32_t perfcounter = (GetPerformanceCounter() & 0xffffffff);
441          m_events_hasher.Write((const unsigned char*)&perfcounter, sizeof(perfcounter));
442      }
443  
444      /**
445       * Feed (the hash of) all events added through AddEvent() to hasher.
446       */
447      void SeedEvents(CSHA512& hasher) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_events_mutex)
448      {
449          // We use only SHA256 for the events hashing to get the ASM speedups we have for SHA256,
450          // since we want it to be fast as network peers may be able to trigger it repeatedly.
451          LOCK(m_events_mutex);
452  
453          unsigned char events_hash[32];
454          m_events_hasher.Finalize(events_hash);
455          hasher.Write(events_hash, 32);
456  
457          // Re-initialize the hasher with the finalized state to use later.
458          m_events_hasher.Reset();
459          m_events_hasher.Write(events_hash, 32);
460      }
461  
462      /** Extract up to 32 bytes of entropy from the RNG state, mixing in new entropy from hasher.
463       *
464       * If this function has never been called with strong_seed = true, false is returned.
465       */
466      bool MixExtract(unsigned char* out, size_t num, CSHA512&& hasher, bool strong_seed) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
467      {
468          assert(num <= 32);
469          unsigned char buf[64];
470          static_assert(sizeof(buf) == CSHA512::OUTPUT_SIZE, "Buffer needs to have hasher's output size");
471          bool ret;
472          {
473              LOCK(m_mutex);
474              ret = (m_strongly_seeded |= strong_seed);
475              // Write the current state of the RNG into the hasher
476              hasher.Write(m_state, 32);
477              // Write a new counter number into the state
478              hasher.Write((const unsigned char*)&m_counter, sizeof(m_counter));
479              ++m_counter;
480              // Finalize the hasher
481              hasher.Finalize(buf);
482              // Store the last 32 bytes of the hash output as new RNG state.
483              memcpy(m_state, buf + 32, 32);
484          }
485          // If desired, copy (up to) the first 32 bytes of the hash output as output.
486          if (num) {
487              assert(out != nullptr);
488              memcpy(out, buf, num);
489          }
490          // Best effort cleanup of internal state
491          hasher.Reset();
492          memory_cleanse(buf, 64);
493          return ret;
494      }
495  };
496  
497  RNGState& GetRNGState() noexcept
498  {
499      // This idiom relies on the guarantee that static variable are initialized
500      // on first call, even when multiple parallel calls are permitted.
501      static std::vector<RNGState, secure_allocator<RNGState>> g_rng(1);
502      return g_rng[0];
503  }
504  }
505  
506  /* A note on the use of noexcept in the seeding functions below:
507   *
508   * None of the RNG code should ever throw any exception.
509   */
510  
511  static void SeedTimestamp(CSHA512& hasher) noexcept
512  {
513      int64_t perfcounter = GetPerformanceCounter();
514      hasher.Write((const unsigned char*)&perfcounter, sizeof(perfcounter));
515  }
516  
517  static void SeedFast(CSHA512& hasher) noexcept
518  {
519      unsigned char buffer[32];
520  
521      // Stack pointer to indirectly commit to thread/callstack
522      const unsigned char* ptr = buffer;
523      hasher.Write((const unsigned char*)&ptr, sizeof(ptr));
524  
525      // Hardware randomness is very fast when available; use it always.
526      SeedHardwareFast(hasher);
527  
528      // High-precision timestamp
529      SeedTimestamp(hasher);
530  }
531  
532  static void SeedSlow(CSHA512& hasher, RNGState& rng) noexcept
533  {
534      unsigned char buffer[32];
535  
536      // Everything that the 'fast' seeder includes
537      SeedFast(hasher);
538  
539      // OS randomness
540      GetOSRand(buffer);
541      hasher.Write(buffer, sizeof(buffer));
542  
543      // Add the events hasher into the mix
544      rng.SeedEvents(hasher);
545  
546      // High-precision timestamp.
547      //
548      // Note that we also commit to a timestamp in the Fast seeder, so we indirectly commit to a
549      // benchmark of all the entropy gathering sources in this function).
550      SeedTimestamp(hasher);
551  }
552  
553  /** Extract entropy from rng, strengthen it, and feed it into hasher. */
554  static void SeedStrengthen(CSHA512& hasher, RNGState& rng, SteadyClock::duration dur) noexcept
555  {
556      // Generate 32 bytes of entropy from the RNG, and a copy of the entropy already in hasher.
557      unsigned char strengthen_seed[32];
558      rng.MixExtract(strengthen_seed, sizeof(strengthen_seed), CSHA512(hasher), false);
559      // Strengthen the seed, and feed it into hasher.
560      Strengthen(strengthen_seed, dur, hasher);
561  }
562  
563  static void SeedPeriodic(CSHA512& hasher, RNGState& rng) noexcept
564  {
565      // Everything that the 'fast' seeder includes
566      SeedFast(hasher);
567  
568      // High-precision timestamp
569      SeedTimestamp(hasher);
570  
571      // Add the events hasher into the mix
572      rng.SeedEvents(hasher);
573  
574      // Dynamic environment data (performance monitoring, ...)
575      auto old_size = hasher.Size();
576      RandAddDynamicEnv(hasher);
577      LogPrint(BCLog::RAND, "Feeding %i bytes of dynamic environment data into RNG\n", hasher.Size() - old_size);
578  
579      // Strengthen for 10 ms
580      SeedStrengthen(hasher, rng, 10ms);
581  }
582  
583  static void SeedStartup(CSHA512& hasher, RNGState& rng) noexcept
584  {
585      // Gather 256 bits of hardware randomness, if available
586      SeedHardwareSlow(hasher);
587  
588      // Everything that the 'slow' seeder includes.
589      SeedSlow(hasher, rng);
590  
591      // Dynamic environment data (performance monitoring, ...)
592      auto old_size = hasher.Size();
593      RandAddDynamicEnv(hasher);
594  
595      // Static environment data
596      RandAddStaticEnv(hasher);
597      LogPrint(BCLog::RAND, "Feeding %i bytes of environment data into RNG\n", hasher.Size() - old_size);
598  
599      // Strengthen for 100 ms
600      SeedStrengthen(hasher, rng, 100ms);
601  }
602  
603  enum class RNGLevel {
604      FAST, //!< Automatically called by GetRandBytes
605      SLOW, //!< Automatically called by GetStrongRandBytes
606      PERIODIC, //!< Called by RandAddPeriodic()
607  };
608  
609  static void ProcRand(unsigned char* out, int num, RNGLevel level) noexcept
610  {
611      // Make sure the RNG is initialized first (as all Seed* function possibly need hwrand to be available).
612      RNGState& rng = GetRNGState();
613  
614      assert(num <= 32);
615  
616      CSHA512 hasher;
617      switch (level) {
618      case RNGLevel::FAST:
619          SeedFast(hasher);
620          break;
621      case RNGLevel::SLOW:
622          SeedSlow(hasher, rng);
623          break;
624      case RNGLevel::PERIODIC:
625          SeedPeriodic(hasher, rng);
626          break;
627      }
628  
629      // Combine with and update state
630      if (!rng.MixExtract(out, num, std::move(hasher), false)) {
631          // On the first invocation, also seed with SeedStartup().
632          CSHA512 startup_hasher;
633          SeedStartup(startup_hasher, rng);
634          rng.MixExtract(out, num, std::move(startup_hasher), true);
635      }
636  }
637  
638  void GetRandBytes(Span<unsigned char> bytes) noexcept { ProcRand(bytes.data(), bytes.size(), RNGLevel::FAST); }
639  void GetStrongRandBytes(Span<unsigned char> bytes) noexcept { ProcRand(bytes.data(), bytes.size(), RNGLevel::SLOW); }
640  void RandAddPeriodic() noexcept { ProcRand(nullptr, 0, RNGLevel::PERIODIC); }
641  void RandAddEvent(const uint32_t event_info) noexcept { GetRNGState().AddEvent(event_info); }
642  
643  bool g_mock_deterministic_tests{false};
644  
645  uint64_t GetRandInternal(uint64_t nMax) noexcept
646  {
647      return FastRandomContext(g_mock_deterministic_tests).randrange(nMax);
648  }
649  
650  uint256 GetRandHash() noexcept
651  {
652      uint256 hash;
653      GetRandBytes(hash);
654      return hash;
655  }
656  
657  void FastRandomContext::RandomSeed()
658  {
659      uint256 seed = GetRandHash();
660      rng.SetKey(MakeByteSpan(seed));
661      requires_seed = false;
662  }
663  
664  uint256 FastRandomContext::rand256() noexcept
665  {
666      if (requires_seed) RandomSeed();
667      uint256 ret;
668      rng.Keystream(MakeWritableByteSpan(ret));
669      return ret;
670  }
671  
672  template <typename B>
673  std::vector<B> FastRandomContext::randbytes(size_t len)
674  {
675      std::vector<B> ret(len);
676      fillrand(MakeWritableByteSpan(ret));
677      return ret;
678  }
679  template std::vector<unsigned char> FastRandomContext::randbytes(size_t);
680  template std::vector<std::byte> FastRandomContext::randbytes(size_t);
681  
682  void FastRandomContext::fillrand(Span<std::byte> output)
683  {
684      if (requires_seed) RandomSeed();
685      rng.Keystream(output);
686  }
687  
688  FastRandomContext::FastRandomContext(const uint256& seed) noexcept : requires_seed(false), rng(MakeByteSpan(seed)), bitbuf_size(0) {}
689  
690  bool Random_SanityCheck()
691  {
692      uint64_t start = GetPerformanceCounter();
693  
694      /* This does not measure the quality of randomness, but it does test that
695       * GetOSRand() overwrites all 32 bytes of the output given a maximum
696       * number of tries.
697       */
698      static constexpr int MAX_TRIES{1024};
699      uint8_t data[NUM_OS_RANDOM_BYTES];
700      bool overwritten[NUM_OS_RANDOM_BYTES] = {}; /* Tracks which bytes have been overwritten at least once */
701      int num_overwritten;
702      int tries = 0;
703      /* Loop until all bytes have been overwritten at least once, or max number tries reached */
704      do {
705          memset(data, 0, NUM_OS_RANDOM_BYTES);
706          GetOSRand(data);
707          for (int x=0; x < NUM_OS_RANDOM_BYTES; ++x) {
708              overwritten[x] |= (data[x] != 0);
709          }
710  
711          num_overwritten = 0;
712          for (int x=0; x < NUM_OS_RANDOM_BYTES; ++x) {
713              if (overwritten[x]) {
714                  num_overwritten += 1;
715              }
716          }
717  
718          tries += 1;
719      } while (num_overwritten < NUM_OS_RANDOM_BYTES && tries < MAX_TRIES);
720      if (num_overwritten != NUM_OS_RANDOM_BYTES) return false; /* If this failed, bailed out after too many tries */
721  
722      // Check that GetPerformanceCounter increases at least during a GetOSRand() call + 1ms sleep.
723      std::this_thread::sleep_for(std::chrono::milliseconds(1));
724      uint64_t stop = GetPerformanceCounter();
725      if (stop == start) return false;
726  
727      // We called GetPerformanceCounter. Use it as entropy.
728      CSHA512 to_add;
729      to_add.Write((const unsigned char*)&start, sizeof(start));
730      to_add.Write((const unsigned char*)&stop, sizeof(stop));
731      GetRNGState().MixExtract(nullptr, 0, std::move(to_add), false);
732  
733      return true;
734  }
735  
736  static constexpr std::array<std::byte, ChaCha20::KEYLEN> ZERO_KEY{};
737  
738  FastRandomContext::FastRandomContext(bool fDeterministic) noexcept : requires_seed(!fDeterministic), rng(ZERO_KEY), bitbuf_size(0)
739  {
740      // Note that despite always initializing with ZERO_KEY, requires_seed is set to true if not
741      // fDeterministic. That means the rng will be reinitialized with a secure random key upon first
742      // use.
743  }
744  
745  FastRandomContext& FastRandomContext::operator=(FastRandomContext&& from) noexcept
746  {
747      requires_seed = from.requires_seed;
748      rng = from.rng;
749      bitbuf = from.bitbuf;
750      bitbuf_size = from.bitbuf_size;
751      from.requires_seed = true;
752      from.bitbuf_size = 0;
753      return *this;
754  }
755  
756  void RandomInit()
757  {
758      // Invoke RNG code to trigger initialization (if not already performed)
759      ProcRand(nullptr, 0, RNGLevel::FAST);
760  
761      ReportHardwareRand();
762  }
763  
764  std::chrono::microseconds GetExponentialRand(std::chrono::microseconds now, std::chrono::seconds average_interval)
765  {
766      double unscaled = -std::log1p(GetRand(uint64_t{1} << 48) * -0.0000000000000035527136788 /* -1/2^48 */);
767      return now + std::chrono::duration_cast<std::chrono::microseconds>(unscaled * average_interval + 0.5us);
768  }