/ src / test / txrequest_tests.cpp
txrequest_tests.cpp
  1  // Copyright (c) 2020-2021 The Bitcoin Core developers
  2  // Distributed under the MIT software license, see the accompanying
  3  // file COPYING or http://www.opensource.org/licenses/mit-license.php.
  4  
  5  
  6  #include <txrequest.h>
  7  #include <uint256.h>
  8  
  9  #include <test/util/random.h>
 10  #include <test/util/setup_common.h>
 11  
 12  #include <algorithm>
 13  #include <functional>
 14  #include <vector>
 15  
 16  #include <boost/test/unit_test.hpp>
 17  
 18  BOOST_FIXTURE_TEST_SUITE(txrequest_tests, BasicTestingSetup)
 19  
 20  namespace {
 21  
 22  constexpr std::chrono::microseconds MIN_TIME = std::chrono::microseconds::min();
 23  constexpr std::chrono::microseconds MAX_TIME = std::chrono::microseconds::max();
 24  constexpr std::chrono::microseconds MICROSECOND = std::chrono::microseconds{1};
 25  constexpr std::chrono::microseconds NO_TIME = std::chrono::microseconds{0};
 26  
 27  /** An Action is a function to call at a particular (simulated) timestamp. */
 28  using Action = std::pair<std::chrono::microseconds, std::function<void()>>;
 29  
 30  /** Object that stores actions from multiple interleaved scenarios, and data shared across them.
 31   *
 32   * The Scenario below is used to fill this.
 33   */
 34  struct Runner
 35  {
 36      /** The TxRequestTracker being tested. */
 37      TxRequestTracker txrequest;
 38  
 39      /** List of actions to be executed (in order of increasing timestamp). */
 40      std::vector<Action> actions;
 41  
 42      /** Which node ids have been assigned already (to prevent reuse). */
 43      std::set<NodeId> peerset;
 44  
 45      /** Which txhashes have been assigned already (to prevent reuse). */
 46      std::set<uint256> txhashset;
 47  
 48      /** Which (peer, gtxid) combinations are known to be expired. These need to be accumulated here instead of
 49       *  checked directly in the GetRequestable return value to avoid introducing a dependency between the various
 50       *  parallel tests. */
 51      std::multiset<std::pair<NodeId, GenTxid>> expired;
 52  };
 53  
 54  std::chrono::microseconds RandomTime8s() { return std::chrono::microseconds{1 + InsecureRandBits(23)}; }
 55  std::chrono::microseconds RandomTime1y() { return std::chrono::microseconds{1 + InsecureRandBits(45)}; }
 56  
 57  /** A proxy for a Runner that helps build a sequence of consecutive test actions on a TxRequestTracker.
 58   *
 59   * Each Scenario is a proxy through which actions for the (sequential) execution of various tests are added to a
 60   * Runner. The actions from multiple scenarios are then run concurrently, resulting in these tests being performed
 61   * against a TxRequestTracker in parallel. Every test has its own unique txhashes and NodeIds which are not
 62   * reused in other tests, and thus they should be independent from each other. Running them in parallel however
 63   * means that we verify the behavior (w.r.t. one test's txhashes and NodeIds) even when the state of the data
 64   * structure is more complicated due to the presence of other tests.
 65   */
 66  class Scenario
 67  {
 68      Runner& m_runner;
 69      std::chrono::microseconds m_now;
 70      std::string m_testname;
 71  
 72  public:
 73      Scenario(Runner& runner, std::chrono::microseconds starttime) : m_runner(runner), m_now(starttime) {}
 74  
 75      /** Set a name for the current test, to give more clear error messages. */
 76      void SetTestName(std::string testname)
 77      {
 78          m_testname = std::move(testname);
 79      }
 80  
 81      /** Advance this Scenario's time; this affects the timestamps newly scheduled events get. */
 82      void AdvanceTime(std::chrono::microseconds amount)
 83      {
 84          assert(amount.count() >= 0);
 85          m_now += amount;
 86      }
 87  
 88      /** Schedule a ForgetTxHash call at the Scheduler's current time. */
 89      void ForgetTxHash(const uint256& txhash)
 90      {
 91          auto& runner = m_runner;
 92          runner.actions.emplace_back(m_now, [=,&runner]() {
 93              runner.txrequest.ForgetTxHash(txhash);
 94              runner.txrequest.SanityCheck();
 95          });
 96      }
 97  
 98      /** Schedule a ReceivedInv call at the Scheduler's current time. */
 99      void ReceivedInv(NodeId peer, const GenTxid& gtxid, bool pref, std::chrono::microseconds reqtime)
100      {
101          auto& runner = m_runner;
102          runner.actions.emplace_back(m_now, [=,&runner]() {
103              runner.txrequest.ReceivedInv(peer, gtxid, pref, reqtime);
104              runner.txrequest.SanityCheck();
105          });
106      }
107  
108      /** Schedule a DisconnectedPeer call at the Scheduler's current time. */
109      void DisconnectedPeer(NodeId peer)
110      {
111          auto& runner = m_runner;
112          runner.actions.emplace_back(m_now, [=,&runner]() {
113              runner.txrequest.DisconnectedPeer(peer);
114              runner.txrequest.SanityCheck();
115          });
116      }
117  
118      /** Schedule a RequestedTx call at the Scheduler's current time. */
119      void RequestedTx(NodeId peer, const uint256& txhash, std::chrono::microseconds exptime)
120      {
121          auto& runner = m_runner;
122          runner.actions.emplace_back(m_now, [=,&runner]() {
123              runner.txrequest.RequestedTx(peer, txhash, exptime);
124              runner.txrequest.SanityCheck();
125          });
126      }
127  
128      /** Schedule a ReceivedResponse call at the Scheduler's current time. */
129      void ReceivedResponse(NodeId peer, const uint256& txhash)
130      {
131          auto& runner = m_runner;
132          runner.actions.emplace_back(m_now, [=,&runner]() {
133              runner.txrequest.ReceivedResponse(peer, txhash);
134              runner.txrequest.SanityCheck();
135          });
136      }
137  
138      /** Schedule calls to verify the TxRequestTracker's state at the Scheduler's current time.
139       *
140       * @param peer       The peer whose state will be inspected.
141       * @param expected   The expected return value for GetRequestable(peer)
142       * @param candidates The expected return value CountCandidates(peer)
143       * @param inflight   The expected return value CountInFlight(peer)
144       * @param completed  The expected return value of Count(peer), minus candidates and inflight.
145       * @param checkname  An arbitrary string to include in error messages, for test identificatrion.
146       * @param offset     Offset with the current time to use (must be <= 0). This allows simulations of time going
147       *                   backwards (but note that the ordering of this event only follows the scenario's m_now.
148       */
149      void Check(NodeId peer, const std::vector<GenTxid>& expected, size_t candidates, size_t inflight,
150          size_t completed, const std::string& checkname,
151          std::chrono::microseconds offset = std::chrono::microseconds{0})
152      {
153          const auto comment = m_testname + " " + checkname;
154          auto& runner = m_runner;
155          const auto now = m_now;
156          assert(offset.count() <= 0);
157          runner.actions.emplace_back(m_now, [=,&runner]() {
158              std::vector<std::pair<NodeId, GenTxid>> expired_now;
159              auto ret = runner.txrequest.GetRequestable(peer, now + offset, &expired_now);
160              for (const auto& entry : expired_now) runner.expired.insert(entry);
161              runner.txrequest.SanityCheck();
162              runner.txrequest.PostGetRequestableSanityCheck(now + offset);
163              size_t total = candidates + inflight + completed;
164              size_t real_total = runner.txrequest.Count(peer);
165              size_t real_candidates = runner.txrequest.CountCandidates(peer);
166              size_t real_inflight = runner.txrequest.CountInFlight(peer);
167              BOOST_CHECK_MESSAGE(real_total == total, strprintf("[" + comment + "] total %i (%i expected)", real_total, total));
168              BOOST_CHECK_MESSAGE(real_inflight == inflight, strprintf("[" + comment + "] inflight %i (%i expected)", real_inflight, inflight));
169              BOOST_CHECK_MESSAGE(real_candidates == candidates, strprintf("[" + comment + "] candidates %i (%i expected)", real_candidates, candidates));
170              BOOST_CHECK_MESSAGE(ret == expected, "[" + comment + "] mismatching requestables");
171          });
172      }
173  
174      /** Verify that an announcement for gtxid by peer has expired some time before this check is scheduled.
175       *
176       * Every expected expiration should be accounted for through exactly one call to this function.
177       */
178      void CheckExpired(NodeId peer, GenTxid gtxid)
179      {
180          const auto& testname = m_testname;
181          auto& runner = m_runner;
182          runner.actions.emplace_back(m_now, [=,&runner]() {
183              auto it = runner.expired.find(std::pair<NodeId, GenTxid>{peer, gtxid});
184              BOOST_CHECK_MESSAGE(it != runner.expired.end(), "[" + testname + "] missing expiration");
185              if (it != runner.expired.end()) runner.expired.erase(it);
186          });
187      }
188  
189      /** Generate a random txhash, whose priorities for certain peers are constrained.
190       *
191       * For example, NewTxHash({{p1,p2,p3},{p2,p4,p5}}) will generate a txhash T such that both:
192       *  - priority(p1,T) > priority(p2,T) > priority(p3,T)
193       *  - priority(p2,T) > priority(p4,T) > priority(p5,T)
194       * where priority is the predicted internal TxRequestTracker's priority, assuming all announcements
195       * are within the same preferredness class.
196       */
197      uint256 NewTxHash(const std::vector<std::vector<NodeId>>& orders = {})
198      {
199          uint256 ret;
200          bool ok;
201          do {
202              ret = InsecureRand256();
203              ok = true;
204              for (const auto& order : orders) {
205                  for (size_t pos = 1; pos < order.size(); ++pos) {
206                      uint64_t prio_prev = m_runner.txrequest.ComputePriority(ret, order[pos - 1], true);
207                      uint64_t prio_cur = m_runner.txrequest.ComputePriority(ret, order[pos], true);
208                      if (prio_prev <= prio_cur) {
209                          ok = false;
210                          break;
211                      }
212                  }
213                  if (!ok) break;
214              }
215              if (ok) {
216                  ok = m_runner.txhashset.insert(ret).second;
217              }
218          } while(!ok);
219          return ret;
220      }
221  
222      /** Generate a random GenTxid; the txhash follows NewTxHash; the is_wtxid flag is random. */
223      GenTxid NewGTxid(const std::vector<std::vector<NodeId>>& orders = {})
224      {
225          return InsecureRandBool() ? GenTxid::Wtxid(NewTxHash(orders)) : GenTxid::Txid(NewTxHash(orders));
226      }
227  
228      /** Generate a new random NodeId to use as peer. The same NodeId is never returned twice
229       *  (across all Scenarios combined). */
230      NodeId NewPeer()
231      {
232          bool ok;
233          NodeId ret;
234          do {
235              ret = InsecureRandBits(63);
236              ok = m_runner.peerset.insert(ret).second;
237          } while(!ok);
238          return ret;
239      }
240  
241      std::chrono::microseconds Now() const { return m_now; }
242  };
243  
244  /** Add to scenario a test with a single tx announced by a single peer.
245   *
246   * config is an integer in [0, 32), which controls which variant of the test is used.
247   */
248  void BuildSingleTest(Scenario& scenario, int config)
249  {
250      auto peer = scenario.NewPeer();
251      auto gtxid = scenario.NewGTxid();
252      bool immediate = config & 1;
253      bool preferred = config & 2;
254      auto delay = immediate ? NO_TIME : RandomTime8s();
255  
256      scenario.SetTestName(strprintf("Single(config=%i)", config));
257  
258      // Receive an announcement, either immediately requestable or delayed.
259      scenario.ReceivedInv(peer, gtxid, preferred, immediate ? MIN_TIME : scenario.Now() + delay);
260      if (immediate) {
261          scenario.Check(peer, {gtxid}, 1, 0, 0, "s1");
262      } else {
263          scenario.Check(peer, {}, 1, 0, 0, "s2");
264          scenario.AdvanceTime(delay - MICROSECOND);
265          scenario.Check(peer, {}, 1, 0, 0, "s3");
266          scenario.AdvanceTime(MICROSECOND);
267          scenario.Check(peer, {gtxid}, 1, 0, 0, "s4");
268      }
269  
270      if (config >> 3) { // We'll request the transaction
271          scenario.AdvanceTime(RandomTime8s());
272          auto expiry = RandomTime8s();
273          scenario.Check(peer, {gtxid}, 1, 0, 0, "s5");
274          scenario.RequestedTx(peer, gtxid.GetHash(), scenario.Now() + expiry);
275          scenario.Check(peer, {}, 0, 1, 0, "s6");
276  
277          if ((config >> 3) == 1) { // The request will time out
278              scenario.AdvanceTime(expiry - MICROSECOND);
279              scenario.Check(peer, {}, 0, 1, 0, "s7");
280              scenario.AdvanceTime(MICROSECOND);
281              scenario.Check(peer, {}, 0, 0, 0, "s8");
282              scenario.CheckExpired(peer, gtxid);
283              return;
284          } else {
285              scenario.AdvanceTime(std::chrono::microseconds{InsecureRandRange(expiry.count())});
286              scenario.Check(peer, {}, 0, 1, 0, "s9");
287              if ((config >> 3) == 3) { // A response will arrive for the transaction
288                  scenario.ReceivedResponse(peer, gtxid.GetHash());
289                  scenario.Check(peer, {}, 0, 0, 0, "s10");
290                  return;
291              }
292          }
293      }
294  
295      if (config & 4) { // The peer will go offline
296          scenario.DisconnectedPeer(peer);
297      } else { // The transaction is no longer needed
298          scenario.ForgetTxHash(gtxid.GetHash());
299      }
300      scenario.Check(peer, {}, 0, 0, 0, "s11");
301  }
302  
303  /** Add to scenario a test with a single tx announced by two peers, to verify the
304   *  right peer is selected for requests.
305   *
306   * config is an integer in [0, 32), which controls which variant of the test is used.
307   */
308  void BuildPriorityTest(Scenario& scenario, int config)
309  {
310      scenario.SetTestName(strprintf("Priority(config=%i)", config));
311  
312      // Two peers. They will announce in order {peer1, peer2}.
313      auto peer1 = scenario.NewPeer(), peer2 = scenario.NewPeer();
314      // Construct a transaction that under random rules would be preferred by peer2 or peer1,
315      // depending on configuration.
316      bool prio1 = config & 1;
317      auto gtxid = prio1 ? scenario.NewGTxid({{peer1, peer2}}) : scenario.NewGTxid({{peer2, peer1}});
318      bool pref1 = config & 2, pref2 = config & 4;
319  
320      scenario.ReceivedInv(peer1, gtxid, pref1, MIN_TIME);
321      scenario.Check(peer1, {gtxid}, 1, 0, 0, "p1");
322      if (InsecureRandBool()) {
323          scenario.AdvanceTime(RandomTime8s());
324          scenario.Check(peer1, {gtxid}, 1, 0, 0, "p2");
325      }
326  
327      scenario.ReceivedInv(peer2, gtxid, pref2, MIN_TIME);
328      bool stage2_prio =
329          // At this point, peer2 will be given priority if:
330          // - It is preferred and peer1 is not
331          (pref2 && !pref1) ||
332          // - They're in the same preference class,
333          //   and the randomized priority favors peer2 over peer1.
334          (pref1 == pref2 && !prio1);
335      NodeId priopeer = stage2_prio ? peer2 : peer1, otherpeer = stage2_prio ? peer1 : peer2;
336      scenario.Check(otherpeer, {}, 1, 0, 0, "p3");
337      scenario.Check(priopeer, {gtxid}, 1, 0, 0, "p4");
338      if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
339      scenario.Check(otherpeer, {}, 1, 0, 0, "p5");
340      scenario.Check(priopeer, {gtxid}, 1, 0, 0, "p6");
341  
342      // We possibly request from the selected peer.
343      if (config & 8) {
344          scenario.RequestedTx(priopeer, gtxid.GetHash(), MAX_TIME);
345          scenario.Check(priopeer, {}, 0, 1, 0, "p7");
346          scenario.Check(otherpeer, {}, 1, 0, 0, "p8");
347          if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
348      }
349  
350      // The peer which was selected (or requested from) now goes offline, or a NOTFOUND is received from them.
351      if (config & 16) {
352          scenario.DisconnectedPeer(priopeer);
353      } else {
354          scenario.ReceivedResponse(priopeer, gtxid.GetHash());
355      }
356      if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
357      scenario.Check(priopeer, {}, 0, 0, !(config & 16), "p8");
358      scenario.Check(otherpeer, {gtxid}, 1, 0, 0, "p9");
359      if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
360  
361      // Now the other peer goes offline.
362      scenario.DisconnectedPeer(otherpeer);
363      if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
364      scenario.Check(peer1, {}, 0, 0, 0, "p10");
365      scenario.Check(peer2, {}, 0, 0, 0, "p11");
366  }
367  
368  /** Add to scenario a randomized test in which N peers announce the same transaction, to verify
369   *  the order in which they are requested. */
370  void BuildBigPriorityTest(Scenario& scenario, int peers)
371  {
372      scenario.SetTestName(strprintf("BigPriority(peers=%i)", peers));
373  
374      // We will have N peers announce the same transaction.
375      std::map<NodeId, bool> preferred;
376      std::vector<NodeId> pref_peers, npref_peers;
377      int num_pref = InsecureRandRange(peers + 1) ; // Some preferred, ...
378      int num_npref = peers - num_pref; // some not preferred.
379      for (int i = 0; i < num_pref; ++i) {
380          pref_peers.push_back(scenario.NewPeer());
381          preferred[pref_peers.back()] = true;
382      }
383      for (int i = 0; i < num_npref; ++i) {
384          npref_peers.push_back(scenario.NewPeer());
385          preferred[npref_peers.back()] = false;
386      }
387      // Make a list of all peers, in order of intended request order (concatenation of pref_peers and npref_peers).
388      std::vector<NodeId> request_order;
389      request_order.reserve(num_pref + num_npref);
390      for (int i = 0; i < num_pref; ++i) request_order.push_back(pref_peers[i]);
391      for (int i = 0; i < num_npref; ++i) request_order.push_back(npref_peers[i]);
392  
393      // Determine the announcement order randomly.
394      std::vector<NodeId> announce_order = request_order;
395      Shuffle(announce_order.begin(), announce_order.end(), g_insecure_rand_ctx);
396  
397      // Find a gtxid whose txhash prioritization is consistent with the required ordering within pref_peers and
398      // within npref_peers.
399      auto gtxid = scenario.NewGTxid({pref_peers, npref_peers});
400  
401      // Decide reqtimes in opposite order of the expected request order. This means that as time passes we expect the
402      // to-be-requested-from-peer will change every time a subsequent reqtime is passed.
403      std::map<NodeId, std::chrono::microseconds> reqtimes;
404      auto reqtime = scenario.Now();
405      for (int i = peers - 1; i >= 0; --i) {
406          reqtime += RandomTime8s();
407          reqtimes[request_order[i]] = reqtime;
408      }
409  
410      // Actually announce from all peers simultaneously (but in announce_order).
411      for (const auto peer : announce_order) {
412          scenario.ReceivedInv(peer, gtxid, preferred[peer], reqtimes[peer]);
413      }
414      for (const auto peer : announce_order) {
415          scenario.Check(peer, {}, 1, 0, 0, "b1");
416      }
417  
418      // Let time pass and observe the to-be-requested-from peer change, from nonpreferred to preferred, and from
419      // high priority to low priority within each class.
420      for (int i = peers - 1; i >= 0; --i) {
421          scenario.AdvanceTime(reqtimes[request_order[i]] - scenario.Now() - MICROSECOND);
422          scenario.Check(request_order[i], {}, 1, 0, 0, "b2");
423          scenario.AdvanceTime(MICROSECOND);
424          scenario.Check(request_order[i], {gtxid}, 1, 0, 0, "b3");
425      }
426  
427      // Peers now in random order go offline, or send NOTFOUNDs. At every point in time the new to-be-requested-from
428      // peer should be the best remaining one, so verify this after every response.
429      for (int i = 0; i < peers; ++i) {
430          if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
431          const int pos = InsecureRandRange(request_order.size());
432          const auto peer = request_order[pos];
433          request_order.erase(request_order.begin() + pos);
434          if (InsecureRandBool()) {
435              scenario.DisconnectedPeer(peer);
436              scenario.Check(peer, {}, 0, 0, 0, "b4");
437          } else {
438              scenario.ReceivedResponse(peer, gtxid.GetHash());
439              scenario.Check(peer, {}, 0, 0, request_order.size() > 0, "b5");
440          }
441          if (request_order.size()) {
442              scenario.Check(request_order[0], {gtxid}, 1, 0, 0, "b6");
443          }
444      }
445  
446      // Everything is gone in the end.
447      for (const auto peer : announce_order) {
448          scenario.Check(peer, {}, 0, 0, 0, "b7");
449      }
450  }
451  
452  /** Add to scenario a test with one peer announcing two transactions, to verify they are
453   *  fetched in announcement order.
454   *
455   *  config is an integer in [0, 4) inclusive, and selects the variant of the test.
456   */
457  void BuildRequestOrderTest(Scenario& scenario, int config)
458  {
459      scenario.SetTestName(strprintf("RequestOrder(config=%i)", config));
460  
461      auto peer = scenario.NewPeer();
462      auto gtxid1 = scenario.NewGTxid();
463      auto gtxid2 = scenario.NewGTxid();
464  
465      auto reqtime2 = scenario.Now() + RandomTime8s();
466      auto reqtime1 = reqtime2 + RandomTime8s();
467  
468      scenario.ReceivedInv(peer, gtxid1, config & 1, reqtime1);
469      // Simulate time going backwards by giving the second announcement an earlier reqtime.
470      scenario.ReceivedInv(peer, gtxid2, config & 2, reqtime2);
471  
472      scenario.AdvanceTime(reqtime2 - MICROSECOND - scenario.Now());
473      scenario.Check(peer, {}, 2, 0, 0, "o1");
474      scenario.AdvanceTime(MICROSECOND);
475      scenario.Check(peer, {gtxid2}, 2, 0, 0, "o2");
476      scenario.AdvanceTime(reqtime1 - MICROSECOND - scenario.Now());
477      scenario.Check(peer, {gtxid2}, 2, 0, 0, "o3");
478      scenario.AdvanceTime(MICROSECOND);
479      // Even with time going backwards in between announcements, the return value of GetRequestable is in
480      // announcement order.
481      scenario.Check(peer, {gtxid1, gtxid2}, 2, 0, 0, "o4");
482  
483      scenario.DisconnectedPeer(peer);
484      scenario.Check(peer, {}, 0, 0, 0, "o5");
485  }
486  
487  /** Add to scenario a test that verifies behavior related to both txid and wtxid with the same
488   *  hash being announced.
489   *
490   *  config is an integer in [0, 4) inclusive, and selects the variant of the test used.
491  */
492  void BuildWtxidTest(Scenario& scenario, int config)
493  {
494      scenario.SetTestName(strprintf("Wtxid(config=%i)", config));
495  
496      auto peerT = scenario.NewPeer();
497      auto peerW = scenario.NewPeer();
498      auto txhash = scenario.NewTxHash();
499      auto txid{GenTxid::Txid(txhash)};
500      auto wtxid{GenTxid::Wtxid(txhash)};
501  
502      auto reqtimeT = InsecureRandBool() ? MIN_TIME : scenario.Now() + RandomTime8s();
503      auto reqtimeW = InsecureRandBool() ? MIN_TIME : scenario.Now() + RandomTime8s();
504  
505      // Announce txid first or wtxid first.
506      if (config & 1) {
507          scenario.ReceivedInv(peerT, txid, config & 2, reqtimeT);
508          if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
509          scenario.ReceivedInv(peerW, wtxid, !(config & 2), reqtimeW);
510      } else {
511          scenario.ReceivedInv(peerW, wtxid, !(config & 2), reqtimeW);
512          if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
513          scenario.ReceivedInv(peerT, txid, config & 2, reqtimeT);
514      }
515  
516      // Let time pass if needed, and check that the preferred announcement (txid or wtxid)
517      // is correctly to-be-requested (and with the correct wtxidness).
518      auto max_reqtime = std::max(reqtimeT, reqtimeW);
519      if (max_reqtime > scenario.Now()) scenario.AdvanceTime(max_reqtime - scenario.Now());
520      if (config & 2) {
521          scenario.Check(peerT, {txid}, 1, 0, 0, "w1");
522          scenario.Check(peerW, {}, 1, 0, 0, "w2");
523      } else {
524          scenario.Check(peerT, {}, 1, 0, 0, "w3");
525          scenario.Check(peerW, {wtxid}, 1, 0, 0, "w4");
526      }
527  
528      // Let the preferred announcement be requested. It's not going to be delivered.
529      auto expiry = RandomTime8s();
530      if (config & 2) {
531          scenario.RequestedTx(peerT, txid.GetHash(), scenario.Now() + expiry);
532          scenario.Check(peerT, {}, 0, 1, 0, "w5");
533          scenario.Check(peerW, {}, 1, 0, 0, "w6");
534      } else {
535          scenario.RequestedTx(peerW, wtxid.GetHash(), scenario.Now() + expiry);
536          scenario.Check(peerT, {}, 1, 0, 0, "w7");
537          scenario.Check(peerW, {}, 0, 1, 0, "w8");
538      }
539  
540      // After reaching expiration time of the preferred announcement, verify that the
541      // remaining one is requestable
542      scenario.AdvanceTime(expiry);
543      if (config & 2) {
544          scenario.Check(peerT, {}, 0, 0, 1, "w9");
545          scenario.Check(peerW, {wtxid}, 1, 0, 0, "w10");
546          scenario.CheckExpired(peerT, txid);
547      } else {
548          scenario.Check(peerT, {txid}, 1, 0, 0, "w11");
549          scenario.Check(peerW, {}, 0, 0, 1, "w12");
550          scenario.CheckExpired(peerW, wtxid);
551      }
552  
553      // If a good transaction with either that hash as wtxid or txid arrives, both
554      // announcements are gone.
555      if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
556      scenario.ForgetTxHash(txhash);
557      scenario.Check(peerT, {}, 0, 0, 0, "w13");
558      scenario.Check(peerW, {}, 0, 0, 0, "w14");
559  }
560  
561  /** Add to scenario a test that exercises clocks that go backwards. */
562  void BuildTimeBackwardsTest(Scenario& scenario)
563  {
564      auto peer1 = scenario.NewPeer();
565      auto peer2 = scenario.NewPeer();
566      auto gtxid = scenario.NewGTxid({{peer1, peer2}});
567  
568      // Announce from peer2.
569      auto reqtime = scenario.Now() + RandomTime8s();
570      scenario.ReceivedInv(peer2, gtxid, true, reqtime);
571      scenario.Check(peer2, {}, 1, 0, 0, "r1");
572      scenario.AdvanceTime(reqtime - scenario.Now());
573      scenario.Check(peer2, {gtxid}, 1, 0, 0, "r2");
574      // Check that if the clock goes backwards by 1us, the transaction would stop being requested.
575      scenario.Check(peer2, {}, 1, 0, 0, "r3", -MICROSECOND);
576      // But it reverts to being requested if time goes forward again.
577      scenario.Check(peer2, {gtxid}, 1, 0, 0, "r4");
578  
579      // Announce from peer1.
580      if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
581      scenario.ReceivedInv(peer1, gtxid, true, MAX_TIME);
582      scenario.Check(peer2, {gtxid}, 1, 0, 0, "r5");
583      scenario.Check(peer1, {}, 1, 0, 0, "r6");
584  
585      // Request from peer1.
586      if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
587      auto expiry = scenario.Now() + RandomTime8s();
588      scenario.RequestedTx(peer1, gtxid.GetHash(), expiry);
589      scenario.Check(peer1, {}, 0, 1, 0, "r7");
590      scenario.Check(peer2, {}, 1, 0, 0, "r8");
591  
592      // Expiration passes.
593      scenario.AdvanceTime(expiry - scenario.Now());
594      scenario.Check(peer1, {}, 0, 0, 1, "r9");
595      scenario.Check(peer2, {gtxid}, 1, 0, 0, "r10"); // Request goes back to peer2.
596      scenario.CheckExpired(peer1, gtxid);
597      scenario.Check(peer1, {}, 0, 0, 1, "r11", -MICROSECOND); // Going back does not unexpire.
598      scenario.Check(peer2, {gtxid}, 1, 0, 0, "r12", -MICROSECOND);
599  
600      // Peer2 goes offline, meaning no viable announcements remain.
601      if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
602      scenario.DisconnectedPeer(peer2);
603      scenario.Check(peer1, {}, 0, 0, 0, "r13");
604      scenario.Check(peer2, {}, 0, 0, 0, "r14");
605  }
606  
607  /** Add to scenario a test that involves RequestedTx() calls for txhashes not returned by GetRequestable. */
608  void BuildWeirdRequestsTest(Scenario& scenario)
609  {
610      auto peer1 = scenario.NewPeer();
611      auto peer2 = scenario.NewPeer();
612      auto gtxid1 = scenario.NewGTxid({{peer1, peer2}});
613      auto gtxid2 = scenario.NewGTxid({{peer2, peer1}});
614  
615      // Announce gtxid1 by peer1.
616      scenario.ReceivedInv(peer1, gtxid1, true, MIN_TIME);
617      scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q1");
618  
619      // Announce gtxid2 by peer2.
620      if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
621      scenario.ReceivedInv(peer2, gtxid2, true, MIN_TIME);
622      scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q2");
623      scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q3");
624  
625      // We request gtxid2 from *peer1* - no effect.
626      if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
627      scenario.RequestedTx(peer1, gtxid2.GetHash(), MAX_TIME);
628      scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q4");
629      scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q5");
630  
631      // Now request gtxid1 from peer1 - marks it as REQUESTED.
632      if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
633      auto expiryA = scenario.Now() + RandomTime8s();
634      scenario.RequestedTx(peer1, gtxid1.GetHash(), expiryA);
635      scenario.Check(peer1, {}, 0, 1, 0, "q6");
636      scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q7");
637  
638      // Request it a second time - nothing happens, as it's already REQUESTED.
639      auto expiryB = expiryA + RandomTime8s();
640      scenario.RequestedTx(peer1, gtxid1.GetHash(), expiryB);
641      scenario.Check(peer1, {}, 0, 1, 0, "q8");
642      scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q9");
643  
644      // Also announce gtxid1 from peer2 now, so that the txhash isn't forgotten when the peer1 request expires.
645      scenario.ReceivedInv(peer2, gtxid1, true, MIN_TIME);
646      scenario.Check(peer1, {}, 0, 1, 0, "q10");
647      scenario.Check(peer2, {gtxid2}, 2, 0, 0, "q11");
648  
649      // When reaching expiryA, it expires (not expiryB, which is later).
650      scenario.AdvanceTime(expiryA - scenario.Now());
651      scenario.Check(peer1, {}, 0, 0, 1, "q12");
652      scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q13");
653      scenario.CheckExpired(peer1, gtxid1);
654  
655      // Requesting it yet again from peer1 doesn't do anything, as it's already COMPLETED.
656      if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
657      scenario.RequestedTx(peer1, gtxid1.GetHash(), MAX_TIME);
658      scenario.Check(peer1, {}, 0, 0, 1, "q14");
659      scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q15");
660  
661      // Now announce gtxid2 from peer1.
662      if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
663      scenario.ReceivedInv(peer1, gtxid2, true, MIN_TIME);
664      scenario.Check(peer1, {}, 1, 0, 1, "q16");
665      scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q17");
666  
667      // And request it from peer1 (weird as peer2 has the preference).
668      if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
669      scenario.RequestedTx(peer1, gtxid2.GetHash(), MAX_TIME);
670      scenario.Check(peer1, {}, 0, 1, 1, "q18");
671      scenario.Check(peer2, {gtxid1}, 2, 0, 0, "q19");
672  
673      // If peer2 now (normally) requests gtxid2, the existing request by peer1 becomes COMPLETED.
674      if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
675      scenario.RequestedTx(peer2, gtxid2.GetHash(), MAX_TIME);
676      scenario.Check(peer1, {}, 0, 0, 2, "q20");
677      scenario.Check(peer2, {gtxid1}, 1, 1, 0, "q21");
678  
679      // If peer2 goes offline, no viable announcements remain.
680      scenario.DisconnectedPeer(peer2);
681      scenario.Check(peer1, {}, 0, 0, 0, "q22");
682      scenario.Check(peer2, {}, 0, 0, 0, "q23");
683  }
684  
685  void TestInterleavedScenarios()
686  {
687      // Create a list of functions which add tests to scenarios.
688      std::vector<std::function<void(Scenario&)>> builders;
689      // Add instances of every test, for every configuration.
690      for (int n = 0; n < 64; ++n) {
691          builders.emplace_back([n](Scenario& scenario){ BuildWtxidTest(scenario, n); });
692          builders.emplace_back([n](Scenario& scenario){ BuildRequestOrderTest(scenario, n & 3); });
693          builders.emplace_back([n](Scenario& scenario){ BuildSingleTest(scenario, n & 31); });
694          builders.emplace_back([n](Scenario& scenario){ BuildPriorityTest(scenario, n & 31); });
695          builders.emplace_back([n](Scenario& scenario){ BuildBigPriorityTest(scenario, (n & 7) + 1); });
696          builders.emplace_back([](Scenario& scenario){ BuildTimeBackwardsTest(scenario); });
697          builders.emplace_back([](Scenario& scenario){ BuildWeirdRequestsTest(scenario); });
698      }
699      // Randomly shuffle all those functions.
700      Shuffle(builders.begin(), builders.end(), g_insecure_rand_ctx);
701  
702      Runner runner;
703      auto starttime = RandomTime1y();
704      // Construct many scenarios, and run (up to) 10 randomly-chosen tests consecutively in each.
705      while (builders.size()) {
706          // Introduce some variation in the start time of each scenario, so they don't all start off
707          // concurrently, but get a more random interleaving.
708          auto scenario_start = starttime + RandomTime8s() + RandomTime8s() + RandomTime8s();
709          Scenario scenario(runner, scenario_start);
710          for (int j = 0; builders.size() && j < 10; ++j) {
711              builders.back()(scenario);
712              builders.pop_back();
713          }
714      }
715      // Sort all the actions from all those scenarios chronologically, resulting in the actions from
716      // distinct scenarios to become interleaved. Use stable_sort so that actions from one scenario
717      // aren't reordered w.r.t. each other.
718      std::stable_sort(runner.actions.begin(), runner.actions.end(), [](const Action& a1, const Action& a2) {
719          return a1.first < a2.first;
720      });
721  
722      // Run all actions from all scenarios, in order.
723      for (auto& action : runner.actions) {
724          action.second();
725      }
726  
727      BOOST_CHECK_EQUAL(runner.txrequest.Size(), 0U);
728      BOOST_CHECK(runner.expired.empty());
729  }
730  
731  }  // namespace
732  
733  BOOST_AUTO_TEST_CASE(TxRequestTest)
734  {
735      for (int i = 0; i < 5; ++i) {
736          TestInterleavedScenarios();
737      }
738  }
739  
740  BOOST_AUTO_TEST_SUITE_END()