txrequest_tests.cpp
1 // Copyright (c) 2020-present The Bitcoin Core developers 2 // Distributed under the MIT software license, see the accompanying 3 // file COPYING or http://www.opensource.org/licenses/mit-license.php. 4 5 6 #include <txrequest.h> 7 #include <uint256.h> 8 9 #include <test/util/random.h> 10 #include <test/util/setup_common.h> 11 12 #include <algorithm> 13 #include <functional> 14 #include <vector> 15 16 #include <boost/test/unit_test.hpp> 17 18 namespace { 19 20 class Scenario; 21 22 struct TxRequestTest : BasicTestingSetup { 23 std::chrono::microseconds RandomTime8s(); 24 std::chrono::microseconds RandomTime1y(); 25 void BuildSingleTest(Scenario& scenario, int config); 26 void BuildPriorityTest(Scenario& scenario, int config); 27 void BuildBigPriorityTest(Scenario& scenario, int peers); 28 void BuildRequestOrderTest(Scenario& scenario, int config); 29 void BuildWtxidTest(Scenario& scenario, int config); 30 void BuildTimeBackwardsTest(Scenario& scenario); 31 void BuildWeirdRequestsTest(Scenario& scenario); 32 void TestInterleavedScenarios(); 33 }; 34 35 constexpr std::chrono::microseconds MIN_TIME = std::chrono::microseconds::min(); 36 constexpr std::chrono::microseconds MAX_TIME = std::chrono::microseconds::max(); 37 constexpr std::chrono::microseconds MICROSECOND = std::chrono::microseconds{1}; 38 constexpr std::chrono::microseconds NO_TIME = std::chrono::microseconds{0}; 39 40 /** An Action is a function to call at a particular (simulated) timestamp. */ 41 using Action = std::pair<std::chrono::microseconds, std::function<void()>>; 42 43 /** Object that stores actions from multiple interleaved scenarios, and data shared across them. 44 * 45 * The Scenario below is used to fill this. 46 */ 47 struct Runner 48 { 49 /** The TxRequestTracker being tested. */ 50 TxRequestTracker txrequest; 51 52 /** List of actions to be executed (in order of increasing timestamp). */ 53 std::vector<Action> actions; 54 55 /** Which node ids have been assigned already (to prevent reuse). */ 56 std::set<NodeId> peerset; 57 58 /** Which txhashes have been assigned already (to prevent reuse). */ 59 std::set<uint256> txhashset; 60 61 /** Which (peer, gtxid) combinations are known to be expired. These need to be accumulated here instead of 62 * checked directly in the GetRequestable return value to avoid introducing a dependency between the various 63 * parallel tests. */ 64 std::multiset<std::pair<NodeId, GenTxid>> expired; 65 }; 66 67 std::chrono::microseconds TxRequestTest::RandomTime8s() { return std::chrono::microseconds{1 + m_rng.randbits(23)}; } 68 std::chrono::microseconds TxRequestTest::RandomTime1y() { return std::chrono::microseconds{1 + m_rng.randbits(45)}; } 69 70 /** A proxy for a Runner that helps build a sequence of consecutive test actions on a TxRequestTracker. 71 * 72 * Each Scenario is a proxy through which actions for the (sequential) execution of various tests are added to a 73 * Runner. The actions from multiple scenarios are then run concurrently, resulting in these tests being performed 74 * against a TxRequestTracker in parallel. Every test has its own unique txhashes and NodeIds which are not 75 * reused in other tests, and thus they should be independent from each other. Running them in parallel however 76 * means that we verify the behavior (w.r.t. one test's txhashes and NodeIds) even when the state of the data 77 * structure is more complicated due to the presence of other tests. 78 */ 79 class Scenario 80 { 81 FastRandomContext& m_rng; 82 Runner& m_runner; 83 std::chrono::microseconds m_now; 84 std::string m_testname; 85 86 public: 87 Scenario(FastRandomContext& rng, Runner& runner, std::chrono::microseconds starttime) : m_rng(rng), m_runner(runner), m_now(starttime) {} 88 89 /** Set a name for the current test, to give more clear error messages. */ 90 void SetTestName(std::string testname) 91 { 92 m_testname = std::move(testname); 93 } 94 95 /** Advance this Scenario's time; this affects the timestamps newly scheduled events get. */ 96 void AdvanceTime(std::chrono::microseconds amount) 97 { 98 assert(amount.count() >= 0); 99 m_now += amount; 100 } 101 102 /** Schedule a ForgetTxHash call at the Scheduler's current time. */ 103 void ForgetTxHash(const uint256& txhash) 104 { 105 auto& runner = m_runner; 106 runner.actions.emplace_back(m_now, [=, &runner]() { 107 runner.txrequest.ForgetTxHash(txhash); 108 runner.txrequest.SanityCheck(); 109 }); 110 } 111 112 /** Schedule a ReceivedInv call at the Scheduler's current time. */ 113 void ReceivedInv(NodeId peer, const GenTxid& gtxid, bool pref, std::chrono::microseconds reqtime) 114 { 115 auto& runner = m_runner; 116 runner.actions.emplace_back(m_now, [=, &runner]() { 117 runner.txrequest.ReceivedInv(peer, gtxid, pref, reqtime); 118 runner.txrequest.SanityCheck(); 119 }); 120 } 121 122 /** Schedule a DisconnectedPeer call at the Scheduler's current time. */ 123 void DisconnectedPeer(NodeId peer) 124 { 125 auto& runner = m_runner; 126 runner.actions.emplace_back(m_now, [=, &runner]() { 127 runner.txrequest.DisconnectedPeer(peer); 128 runner.txrequest.SanityCheck(); 129 }); 130 } 131 132 /** Schedule a RequestedTx call at the Scheduler's current time. */ 133 void RequestedTx(NodeId peer, const uint256& txhash, std::chrono::microseconds exptime) 134 { 135 auto& runner = m_runner; 136 runner.actions.emplace_back(m_now, [=, &runner]() { 137 runner.txrequest.RequestedTx(peer, txhash, exptime); 138 runner.txrequest.SanityCheck(); 139 }); 140 } 141 142 /** Schedule a ReceivedResponse call at the Scheduler's current time. */ 143 void ReceivedResponse(NodeId peer, const uint256& txhash) 144 { 145 auto& runner = m_runner; 146 runner.actions.emplace_back(m_now, [=, &runner]() { 147 runner.txrequest.ReceivedResponse(peer, txhash); 148 runner.txrequest.SanityCheck(); 149 }); 150 } 151 152 /** Schedule calls to verify the TxRequestTracker's state at the Scheduler's current time. 153 * 154 * @param peer The peer whose state will be inspected. 155 * @param expected The expected return value for GetRequestable(peer) 156 * @param candidates The expected return value CountCandidates(peer) 157 * @param inflight The expected return value CountInFlight(peer) 158 * @param completed The expected return value of Count(peer), minus candidates and inflight. 159 * @param checkname An arbitrary string to include in error messages, for test identificatrion. 160 * @param offset Offset with the current time to use (must be <= 0). This allows simulations of time going 161 * backwards (but note that the ordering of this event only follows the scenario's m_now. 162 */ 163 void Check(NodeId peer, const std::vector<GenTxid>& expected, size_t candidates, size_t inflight, 164 size_t completed, const std::string& checkname, 165 std::chrono::microseconds offset = std::chrono::microseconds{0}) 166 { 167 const auto comment = m_testname + " " + checkname; 168 auto& runner = m_runner; 169 const auto now = m_now; 170 assert(offset.count() <= 0); 171 runner.actions.emplace_back(m_now, [=, &runner]() { 172 std::vector<std::pair<NodeId, GenTxid>> expired_now; 173 auto ret = runner.txrequest.GetRequestable(peer, now + offset, &expired_now); 174 for (const auto& entry : expired_now) { 175 runner.expired.insert(entry); 176 } 177 runner.txrequest.SanityCheck(); 178 runner.txrequest.PostGetRequestableSanityCheck(now + offset); 179 size_t total = candidates + inflight + completed; 180 size_t real_total = runner.txrequest.Count(peer); 181 size_t real_candidates = runner.txrequest.CountCandidates(peer); 182 size_t real_inflight = runner.txrequest.CountInFlight(peer); 183 BOOST_CHECK_MESSAGE(real_total == total, strprintf("[%s] total %i (%i expected)", comment, real_total, total)); 184 BOOST_CHECK_MESSAGE(real_inflight == inflight, strprintf("[%s] inflight %i (%i expected)", comment, real_inflight, inflight)); 185 BOOST_CHECK_MESSAGE(real_candidates == candidates, strprintf("[%s] candidates %i (%i expected)", comment, real_candidates, candidates)); 186 BOOST_CHECK_MESSAGE(ret == expected, strprintf("[%s] mismatching requestables", comment)); 187 }); 188 } 189 190 /** Verify that an announcement for gtxid by peer has expired some time before this check is scheduled. 191 * 192 * Every expected expiration should be accounted for through exactly one call to this function. 193 */ 194 void CheckExpired(NodeId peer, GenTxid gtxid) 195 { 196 const auto& testname = m_testname; 197 auto& runner = m_runner; 198 runner.actions.emplace_back(m_now, [=, &runner]() { 199 auto it = runner.expired.find(std::pair<NodeId, GenTxid>{peer, gtxid}); 200 BOOST_CHECK_MESSAGE(it != runner.expired.end(), "[" + testname + "] missing expiration"); 201 if (it != runner.expired.end()) runner.expired.erase(it); 202 }); 203 } 204 205 /** Generate a random txhash, whose priorities for certain peers are constrained. 206 * 207 * For example, NewTxHash({{p1,p2,p3},{p2,p4,p5}}) will generate a txhash T such that both: 208 * - priority(p1,T) > priority(p2,T) > priority(p3,T) 209 * - priority(p2,T) > priority(p4,T) > priority(p5,T) 210 * where priority is the predicted internal TxRequestTracker's priority, assuming all announcements 211 * are within the same preferredness class. 212 */ 213 uint256 NewTxHash(const std::vector<std::vector<NodeId>>& orders = {}) 214 { 215 uint256 ret; 216 bool ok; 217 do { 218 ret = m_rng.rand256(); 219 ok = true; 220 for (const auto& order : orders) { 221 for (size_t pos = 1; pos < order.size(); ++pos) { 222 uint64_t prio_prev = m_runner.txrequest.ComputePriority(ret, order[pos - 1], true); 223 uint64_t prio_cur = m_runner.txrequest.ComputePriority(ret, order[pos], true); 224 if (prio_prev <= prio_cur) { 225 ok = false; 226 break; 227 } 228 } 229 if (!ok) break; 230 } 231 if (ok) { 232 ok = m_runner.txhashset.insert(ret).second; 233 } 234 } while(!ok); 235 return ret; 236 } 237 238 /** Generate a random GenTxid; the txhash follows NewTxHash; the transaction identifier is random. */ 239 GenTxid NewGTxid(const std::vector<std::vector<NodeId>>& orders = {}) 240 { 241 const uint256 txhash{NewTxHash(orders)}; 242 return m_rng.randbool() ? GenTxid{Wtxid::FromUint256(txhash)} : GenTxid{Txid::FromUint256(txhash)}; 243 } 244 245 /** Generate a new random NodeId to use as peer. The same NodeId is never returned twice 246 * (across all Scenarios combined). */ 247 NodeId NewPeer() 248 { 249 bool ok; 250 NodeId ret; 251 do { 252 ret = m_rng.randbits(63); 253 ok = m_runner.peerset.insert(ret).second; 254 } while(!ok); 255 return ret; 256 } 257 258 std::chrono::microseconds Now() const { return m_now; } 259 }; 260 261 /** Add to scenario a test with a single tx announced by a single peer. 262 * 263 * config is an integer in [0, 32), which controls which variant of the test is used. 264 */ 265 void TxRequestTest::BuildSingleTest(Scenario& scenario, int config) 266 { 267 auto peer = scenario.NewPeer(); 268 auto gtxid = scenario.NewGTxid(); 269 bool immediate = config & 1; 270 bool preferred = config & 2; 271 auto delay = immediate ? NO_TIME : RandomTime8s(); 272 273 scenario.SetTestName(strprintf("Single(config=%i)", config)); 274 275 // Receive an announcement, either immediately requestable or delayed. 276 scenario.ReceivedInv(peer, gtxid, preferred, immediate ? MIN_TIME : scenario.Now() + delay); 277 if (immediate) { 278 scenario.Check(peer, {gtxid}, 1, 0, 0, "s1"); 279 } else { 280 scenario.Check(peer, {}, 1, 0, 0, "s2"); 281 scenario.AdvanceTime(delay - MICROSECOND); 282 scenario.Check(peer, {}, 1, 0, 0, "s3"); 283 scenario.AdvanceTime(MICROSECOND); 284 scenario.Check(peer, {gtxid}, 1, 0, 0, "s4"); 285 } 286 287 if (config >> 3) { // We'll request the transaction 288 scenario.AdvanceTime(RandomTime8s()); 289 auto expiry = RandomTime8s(); 290 scenario.Check(peer, {gtxid}, 1, 0, 0, "s5"); 291 scenario.RequestedTx(peer, gtxid.ToUint256(), scenario.Now() + expiry); 292 scenario.Check(peer, {}, 0, 1, 0, "s6"); 293 294 if ((config >> 3) == 1) { // The request will time out 295 scenario.AdvanceTime(expiry - MICROSECOND); 296 scenario.Check(peer, {}, 0, 1, 0, "s7"); 297 scenario.AdvanceTime(MICROSECOND); 298 scenario.Check(peer, {}, 0, 0, 0, "s8"); 299 scenario.CheckExpired(peer, gtxid); 300 return; 301 } else { 302 scenario.AdvanceTime(std::chrono::microseconds{m_rng.randrange(expiry.count())}); 303 scenario.Check(peer, {}, 0, 1, 0, "s9"); 304 if ((config >> 3) == 3) { // A response will arrive for the transaction 305 scenario.ReceivedResponse(peer, gtxid.ToUint256()); 306 scenario.Check(peer, {}, 0, 0, 0, "s10"); 307 return; 308 } 309 } 310 } 311 312 if (config & 4) { // The peer will go offline 313 scenario.DisconnectedPeer(peer); 314 } else { // The transaction is no longer needed 315 scenario.ForgetTxHash(gtxid.ToUint256()); 316 } 317 scenario.Check(peer, {}, 0, 0, 0, "s11"); 318 } 319 320 /** Add to scenario a test with a single tx announced by two peers, to verify the 321 * right peer is selected for requests. 322 * 323 * config is an integer in [0, 32), which controls which variant of the test is used. 324 */ 325 void TxRequestTest::BuildPriorityTest(Scenario& scenario, int config) 326 { 327 scenario.SetTestName(strprintf("Priority(config=%i)", config)); 328 329 // Two peers. They will announce in order {peer1, peer2}. 330 auto peer1 = scenario.NewPeer(), peer2 = scenario.NewPeer(); 331 // Construct a transaction that under random rules would be preferred by peer2 or peer1, 332 // depending on configuration. 333 bool prio1 = config & 1; 334 auto gtxid = prio1 ? scenario.NewGTxid({{peer1, peer2}}) : scenario.NewGTxid({{peer2, peer1}}); 335 bool pref1 = config & 2, pref2 = config & 4; 336 337 scenario.ReceivedInv(peer1, gtxid, pref1, MIN_TIME); 338 scenario.Check(peer1, {gtxid}, 1, 0, 0, "p1"); 339 if (m_rng.randbool()) { 340 scenario.AdvanceTime(RandomTime8s()); 341 scenario.Check(peer1, {gtxid}, 1, 0, 0, "p2"); 342 } 343 344 scenario.ReceivedInv(peer2, gtxid, pref2, MIN_TIME); 345 bool stage2_prio = 346 // At this point, peer2 will be given priority if: 347 // - It is preferred and peer1 is not 348 (pref2 && !pref1) || 349 // - They're in the same preference class, 350 // and the randomized priority favors peer2 over peer1. 351 (pref1 == pref2 && !prio1); 352 NodeId priopeer = stage2_prio ? peer2 : peer1, otherpeer = stage2_prio ? peer1 : peer2; 353 scenario.Check(otherpeer, {}, 1, 0, 0, "p3"); 354 scenario.Check(priopeer, {gtxid}, 1, 0, 0, "p4"); 355 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s()); 356 scenario.Check(otherpeer, {}, 1, 0, 0, "p5"); 357 scenario.Check(priopeer, {gtxid}, 1, 0, 0, "p6"); 358 359 // We possibly request from the selected peer. 360 if (config & 8) { 361 scenario.RequestedTx(priopeer, gtxid.ToUint256(), MAX_TIME); 362 scenario.Check(priopeer, {}, 0, 1, 0, "p7"); 363 scenario.Check(otherpeer, {}, 1, 0, 0, "p8"); 364 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s()); 365 } 366 367 // The peer which was selected (or requested from) now goes offline, or a NOTFOUND is received from them. 368 if (config & 16) { 369 scenario.DisconnectedPeer(priopeer); 370 } else { 371 scenario.ReceivedResponse(priopeer, gtxid.ToUint256()); 372 } 373 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s()); 374 scenario.Check(priopeer, {}, 0, 0, !(config & 16), "p8"); 375 scenario.Check(otherpeer, {gtxid}, 1, 0, 0, "p9"); 376 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s()); 377 378 // Now the other peer goes offline. 379 scenario.DisconnectedPeer(otherpeer); 380 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s()); 381 scenario.Check(peer1, {}, 0, 0, 0, "p10"); 382 scenario.Check(peer2, {}, 0, 0, 0, "p11"); 383 } 384 385 /** Add to scenario a randomized test in which N peers announce the same transaction, to verify 386 * the order in which they are requested. */ 387 void TxRequestTest::BuildBigPriorityTest(Scenario& scenario, int peers) 388 { 389 scenario.SetTestName(strprintf("BigPriority(peers=%i)", peers)); 390 391 // We will have N peers announce the same transaction. 392 std::map<NodeId, bool> preferred; 393 std::vector<NodeId> pref_peers, npref_peers; 394 int num_pref = m_rng.randrange(peers + 1) ; // Some preferred, ... 395 int num_npref = peers - num_pref; // some not preferred. 396 for (int i = 0; i < num_pref; ++i) { 397 pref_peers.push_back(scenario.NewPeer()); 398 preferred[pref_peers.back()] = true; 399 } 400 for (int i = 0; i < num_npref; ++i) { 401 npref_peers.push_back(scenario.NewPeer()); 402 preferred[npref_peers.back()] = false; 403 } 404 // Make a list of all peers, in order of intended request order (concatenation of pref_peers and npref_peers). 405 std::vector<NodeId> request_order; 406 request_order.reserve(num_pref + num_npref); 407 for (int i = 0; i < num_pref; ++i) request_order.push_back(pref_peers[i]); 408 for (int i = 0; i < num_npref; ++i) request_order.push_back(npref_peers[i]); 409 410 // Determine the announcement order randomly. 411 std::vector<NodeId> announce_order = request_order; 412 std::shuffle(announce_order.begin(), announce_order.end(), m_rng); 413 414 // Find a gtxid whose txhash prioritization is consistent with the required ordering within pref_peers and 415 // within npref_peers. 416 auto gtxid = scenario.NewGTxid({pref_peers, npref_peers}); 417 418 // Decide reqtimes in opposite order of the expected request order. This means that as time passes we expect the 419 // to-be-requested-from-peer will change every time a subsequent reqtime is passed. 420 std::map<NodeId, std::chrono::microseconds> reqtimes; 421 auto reqtime = scenario.Now(); 422 for (int i = peers - 1; i >= 0; --i) { 423 reqtime += RandomTime8s(); 424 reqtimes[request_order[i]] = reqtime; 425 } 426 427 // Actually announce from all peers simultaneously (but in announce_order). 428 for (const auto peer : announce_order) { 429 scenario.ReceivedInv(peer, gtxid, preferred[peer], reqtimes[peer]); 430 } 431 for (const auto peer : announce_order) { 432 scenario.Check(peer, {}, 1, 0, 0, "b1"); 433 } 434 435 // Let time pass and observe the to-be-requested-from peer change, from nonpreferred to preferred, and from 436 // high priority to low priority within each class. 437 for (int i = peers - 1; i >= 0; --i) { 438 scenario.AdvanceTime(reqtimes[request_order[i]] - scenario.Now() - MICROSECOND); 439 scenario.Check(request_order[i], {}, 1, 0, 0, "b2"); 440 scenario.AdvanceTime(MICROSECOND); 441 scenario.Check(request_order[i], {gtxid}, 1, 0, 0, "b3"); 442 } 443 444 // Peers now in random order go offline, or send NOTFOUNDs. At every point in time the new to-be-requested-from 445 // peer should be the best remaining one, so verify this after every response. 446 for (int i = 0; i < peers; ++i) { 447 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s()); 448 const int pos = m_rng.randrange(request_order.size()); 449 const auto peer = request_order[pos]; 450 request_order.erase(request_order.begin() + pos); 451 if (m_rng.randbool()) { 452 scenario.DisconnectedPeer(peer); 453 scenario.Check(peer, {}, 0, 0, 0, "b4"); 454 } else { 455 scenario.ReceivedResponse(peer, gtxid.ToUint256()); 456 scenario.Check(peer, {}, 0, 0, request_order.size() > 0, "b5"); 457 } 458 if (request_order.size()) { 459 scenario.Check(request_order[0], {gtxid}, 1, 0, 0, "b6"); 460 } 461 } 462 463 // Everything is gone in the end. 464 for (const auto peer : announce_order) { 465 scenario.Check(peer, {}, 0, 0, 0, "b7"); 466 } 467 } 468 469 /** Add to scenario a test with one peer announcing two transactions, to verify they are 470 * fetched in announcement order. 471 * 472 * config is an integer in [0, 4) inclusive, and selects the variant of the test. 473 */ 474 void TxRequestTest::BuildRequestOrderTest(Scenario& scenario, int config) 475 { 476 scenario.SetTestName(strprintf("RequestOrder(config=%i)", config)); 477 478 auto peer = scenario.NewPeer(); 479 auto gtxid1 = scenario.NewGTxid(); 480 auto gtxid2 = scenario.NewGTxid(); 481 482 auto reqtime2 = scenario.Now() + RandomTime8s(); 483 auto reqtime1 = reqtime2 + RandomTime8s(); 484 485 scenario.ReceivedInv(peer, gtxid1, config & 1, reqtime1); 486 // Simulate time going backwards by giving the second announcement an earlier reqtime. 487 scenario.ReceivedInv(peer, gtxid2, config & 2, reqtime2); 488 489 scenario.AdvanceTime(reqtime2 - MICROSECOND - scenario.Now()); 490 scenario.Check(peer, {}, 2, 0, 0, "o1"); 491 scenario.AdvanceTime(MICROSECOND); 492 scenario.Check(peer, {gtxid2}, 2, 0, 0, "o2"); 493 scenario.AdvanceTime(reqtime1 - MICROSECOND - scenario.Now()); 494 scenario.Check(peer, {gtxid2}, 2, 0, 0, "o3"); 495 scenario.AdvanceTime(MICROSECOND); 496 // Even with time going backwards in between announcements, the return value of GetRequestable is in 497 // announcement order. 498 scenario.Check(peer, {gtxid1, gtxid2}, 2, 0, 0, "o4"); 499 500 scenario.DisconnectedPeer(peer); 501 scenario.Check(peer, {}, 0, 0, 0, "o5"); 502 } 503 504 /** Add to scenario a test that verifies behavior related to both txid and wtxid with the same 505 * hash being announced. 506 * 507 * config is an integer in [0, 4) inclusive, and selects the variant of the test used. 508 */ 509 void TxRequestTest::BuildWtxidTest(Scenario& scenario, int config) 510 { 511 scenario.SetTestName(strprintf("Wtxid(config=%i)", config)); 512 513 auto peerT = scenario.NewPeer(); 514 auto peerW = scenario.NewPeer(); 515 auto txhash = scenario.NewTxHash(); 516 auto txid{Txid::FromUint256(txhash)}; 517 auto wtxid{Wtxid::FromUint256(txhash)}; 518 519 auto reqtimeT = m_rng.randbool() ? MIN_TIME : scenario.Now() + RandomTime8s(); 520 auto reqtimeW = m_rng.randbool() ? MIN_TIME : scenario.Now() + RandomTime8s(); 521 522 // Announce txid first or wtxid first. 523 if (config & 1) { 524 scenario.ReceivedInv(peerT, txid, config & 2, reqtimeT); 525 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s()); 526 scenario.ReceivedInv(peerW, wtxid, !(config & 2), reqtimeW); 527 } else { 528 scenario.ReceivedInv(peerW, wtxid, !(config & 2), reqtimeW); 529 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s()); 530 scenario.ReceivedInv(peerT, txid, config & 2, reqtimeT); 531 } 532 533 // Let time pass if needed, and check that the preferred announcement (txid or wtxid) 534 // is correctly to-be-requested (and with the correct wtxidness). 535 auto max_reqtime = std::max(reqtimeT, reqtimeW); 536 if (max_reqtime > scenario.Now()) scenario.AdvanceTime(max_reqtime - scenario.Now()); 537 if (config & 2) { 538 scenario.Check(peerT, {txid}, 1, 0, 0, "w1"); 539 scenario.Check(peerW, {}, 1, 0, 0, "w2"); 540 } else { 541 scenario.Check(peerT, {}, 1, 0, 0, "w3"); 542 scenario.Check(peerW, {wtxid}, 1, 0, 0, "w4"); 543 } 544 545 // Let the preferred announcement be requested. It's not going to be delivered. 546 auto expiry = RandomTime8s(); 547 if (config & 2) { 548 scenario.RequestedTx(peerT, txid.ToUint256(), scenario.Now() + expiry); 549 scenario.Check(peerT, {}, 0, 1, 0, "w5"); 550 scenario.Check(peerW, {}, 1, 0, 0, "w6"); 551 } else { 552 scenario.RequestedTx(peerW, wtxid.ToUint256(), scenario.Now() + expiry); 553 scenario.Check(peerT, {}, 1, 0, 0, "w7"); 554 scenario.Check(peerW, {}, 0, 1, 0, "w8"); 555 } 556 557 // After reaching expiration time of the preferred announcement, verify that the 558 // remaining one is requestable 559 scenario.AdvanceTime(expiry); 560 if (config & 2) { 561 scenario.Check(peerT, {}, 0, 0, 1, "w9"); 562 scenario.Check(peerW, {wtxid}, 1, 0, 0, "w10"); 563 scenario.CheckExpired(peerT, txid); 564 } else { 565 scenario.Check(peerT, {txid}, 1, 0, 0, "w11"); 566 scenario.Check(peerW, {}, 0, 0, 1, "w12"); 567 scenario.CheckExpired(peerW, wtxid); 568 } 569 570 // If a good transaction with either that hash as wtxid or txid arrives, both 571 // announcements are gone. 572 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s()); 573 scenario.ForgetTxHash(txhash); 574 scenario.Check(peerT, {}, 0, 0, 0, "w13"); 575 scenario.Check(peerW, {}, 0, 0, 0, "w14"); 576 } 577 578 /** Add to scenario a test that exercises clocks that go backwards. */ 579 void TxRequestTest::BuildTimeBackwardsTest(Scenario& scenario) 580 { 581 auto peer1 = scenario.NewPeer(); 582 auto peer2 = scenario.NewPeer(); 583 auto gtxid = scenario.NewGTxid({{peer1, peer2}}); 584 585 // Announce from peer2. 586 auto reqtime = scenario.Now() + RandomTime8s(); 587 scenario.ReceivedInv(peer2, gtxid, true, reqtime); 588 scenario.Check(peer2, {}, 1, 0, 0, "r1"); 589 scenario.AdvanceTime(reqtime - scenario.Now()); 590 scenario.Check(peer2, {gtxid}, 1, 0, 0, "r2"); 591 // Check that if the clock goes backwards by 1us, the transaction would stop being requested. 592 scenario.Check(peer2, {}, 1, 0, 0, "r3", -MICROSECOND); 593 // But it reverts to being requested if time goes forward again. 594 scenario.Check(peer2, {gtxid}, 1, 0, 0, "r4"); 595 596 // Announce from peer1. 597 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s()); 598 scenario.ReceivedInv(peer1, gtxid, true, MAX_TIME); 599 scenario.Check(peer2, {gtxid}, 1, 0, 0, "r5"); 600 scenario.Check(peer1, {}, 1, 0, 0, "r6"); 601 602 // Request from peer1. 603 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s()); 604 auto expiry = scenario.Now() + RandomTime8s(); 605 scenario.RequestedTx(peer1, gtxid.ToUint256(), expiry); 606 scenario.Check(peer1, {}, 0, 1, 0, "r7"); 607 scenario.Check(peer2, {}, 1, 0, 0, "r8"); 608 609 // Expiration passes. 610 scenario.AdvanceTime(expiry - scenario.Now()); 611 scenario.Check(peer1, {}, 0, 0, 1, "r9"); 612 scenario.Check(peer2, {gtxid}, 1, 0, 0, "r10"); // Request goes back to peer2. 613 scenario.CheckExpired(peer1, gtxid); 614 scenario.Check(peer1, {}, 0, 0, 1, "r11", -MICROSECOND); // Going back does not unexpire. 615 scenario.Check(peer2, {gtxid}, 1, 0, 0, "r12", -MICROSECOND); 616 617 // Peer2 goes offline, meaning no viable announcements remain. 618 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s()); 619 scenario.DisconnectedPeer(peer2); 620 scenario.Check(peer1, {}, 0, 0, 0, "r13"); 621 scenario.Check(peer2, {}, 0, 0, 0, "r14"); 622 } 623 624 /** Add to scenario a test that involves RequestedTx() calls for txhashes not returned by GetRequestable. */ 625 void TxRequestTest::BuildWeirdRequestsTest(Scenario& scenario) 626 { 627 auto peer1 = scenario.NewPeer(); 628 auto peer2 = scenario.NewPeer(); 629 auto gtxid1 = scenario.NewGTxid({{peer1, peer2}}); 630 auto gtxid2 = scenario.NewGTxid({{peer2, peer1}}); 631 632 // Announce gtxid1 by peer1. 633 scenario.ReceivedInv(peer1, gtxid1, true, MIN_TIME); 634 scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q1"); 635 636 // Announce gtxid2 by peer2. 637 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s()); 638 scenario.ReceivedInv(peer2, gtxid2, true, MIN_TIME); 639 scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q2"); 640 scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q3"); 641 642 // We request gtxid2 from *peer1* - no effect. 643 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s()); 644 scenario.RequestedTx(peer1, gtxid2.ToUint256(), MAX_TIME); 645 scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q4"); 646 scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q5"); 647 648 // Now request gtxid1 from peer1 - marks it as REQUESTED. 649 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s()); 650 auto expiryA = scenario.Now() + RandomTime8s(); 651 scenario.RequestedTx(peer1, gtxid1.ToUint256(), expiryA); 652 scenario.Check(peer1, {}, 0, 1, 0, "q6"); 653 scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q7"); 654 655 // Request it a second time - nothing happens, as it's already REQUESTED. 656 auto expiryB = expiryA + RandomTime8s(); 657 scenario.RequestedTx(peer1, gtxid1.ToUint256(), expiryB); 658 scenario.Check(peer1, {}, 0, 1, 0, "q8"); 659 scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q9"); 660 661 // Also announce gtxid1 from peer2 now, so that the txhash isn't forgotten when the peer1 request expires. 662 scenario.ReceivedInv(peer2, gtxid1, true, MIN_TIME); 663 scenario.Check(peer1, {}, 0, 1, 0, "q10"); 664 scenario.Check(peer2, {gtxid2}, 2, 0, 0, "q11"); 665 666 // When reaching expiryA, it expires (not expiryB, which is later). 667 scenario.AdvanceTime(expiryA - scenario.Now()); 668 scenario.Check(peer1, {}, 0, 0, 1, "q12"); 669 scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q13"); 670 scenario.CheckExpired(peer1, gtxid1); 671 672 // Requesting it yet again from peer1 doesn't do anything, as it's already COMPLETED. 673 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s()); 674 scenario.RequestedTx(peer1, gtxid1.ToUint256(), MAX_TIME); 675 scenario.Check(peer1, {}, 0, 0, 1, "q14"); 676 scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q15"); 677 678 // Now announce gtxid2 from peer1. 679 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s()); 680 scenario.ReceivedInv(peer1, gtxid2, true, MIN_TIME); 681 scenario.Check(peer1, {}, 1, 0, 1, "q16"); 682 scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q17"); 683 684 // And request it from peer1 (weird as peer2 has the preference). 685 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s()); 686 scenario.RequestedTx(peer1, gtxid2.ToUint256(), MAX_TIME); 687 scenario.Check(peer1, {}, 0, 1, 1, "q18"); 688 scenario.Check(peer2, {gtxid1}, 2, 0, 0, "q19"); 689 690 // If peer2 now (normally) requests gtxid2, the existing request by peer1 becomes COMPLETED. 691 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s()); 692 scenario.RequestedTx(peer2, gtxid2.ToUint256(), MAX_TIME); 693 scenario.Check(peer1, {}, 0, 0, 2, "q20"); 694 scenario.Check(peer2, {gtxid1}, 1, 1, 0, "q21"); 695 696 // If peer2 goes offline, no viable announcements remain. 697 scenario.DisconnectedPeer(peer2); 698 scenario.Check(peer1, {}, 0, 0, 0, "q22"); 699 scenario.Check(peer2, {}, 0, 0, 0, "q23"); 700 } 701 702 void TxRequestTest::TestInterleavedScenarios() 703 { 704 // Create a list of functions which add tests to scenarios. 705 std::vector<std::function<void(Scenario&)>> builders; 706 // Add instances of every test, for every configuration. 707 for (int n = 0; n < 64; ++n) { 708 builders.emplace_back([this, n](Scenario& scenario) { BuildWtxidTest(scenario, n); }); 709 builders.emplace_back([this, n](Scenario& scenario) { BuildRequestOrderTest(scenario, n & 3); }); 710 builders.emplace_back([this, n](Scenario& scenario) { BuildSingleTest(scenario, n & 31); }); 711 builders.emplace_back([this, n](Scenario& scenario) { BuildPriorityTest(scenario, n & 31); }); 712 builders.emplace_back([this, n](Scenario& scenario) { BuildBigPriorityTest(scenario, (n & 7) + 1); }); 713 builders.emplace_back([this](Scenario& scenario) { BuildTimeBackwardsTest(scenario); }); 714 builders.emplace_back([this](Scenario& scenario) { BuildWeirdRequestsTest(scenario); }); 715 } 716 // Randomly shuffle all those functions. 717 std::shuffle(builders.begin(), builders.end(), m_rng); 718 719 Runner runner; 720 auto starttime = RandomTime1y(); 721 // Construct many scenarios, and run (up to) 10 randomly-chosen tests consecutively in each. 722 while (builders.size()) { 723 // Introduce some variation in the start time of each scenario, so they don't all start off 724 // concurrently, but get a more random interleaving. 725 auto scenario_start = starttime + RandomTime8s() + RandomTime8s() + RandomTime8s(); 726 Scenario scenario(m_rng, runner, scenario_start); 727 for (int j = 0; builders.size() && j < 10; ++j) { 728 builders.back()(scenario); 729 builders.pop_back(); 730 } 731 } 732 // Sort all the actions from all those scenarios chronologically, resulting in the actions from 733 // distinct scenarios to become interleaved. Use stable_sort so that actions from one scenario 734 // aren't reordered w.r.t. each other. 735 std::stable_sort(runner.actions.begin(), runner.actions.end(), [](const Action& a1, const Action& a2) { 736 return a1.first < a2.first; 737 }); 738 739 // Run all actions from all scenarios, in order. 740 for (auto& action : runner.actions) { 741 action.second(); 742 } 743 744 BOOST_CHECK_EQUAL(runner.txrequest.Size(), 0U); 745 BOOST_CHECK(runner.expired.empty()); 746 } 747 748 } // namespace 749 750 BOOST_FIXTURE_TEST_SUITE(txrequest_tests, TxRequestTest) 751 752 BOOST_AUTO_TEST_CASE(TxRequestTest) 753 { 754 for (int i = 0; i < 5; ++i) { 755 TestInterleavedScenarios(); 756 } 757 } 758 759 BOOST_AUTO_TEST_SUITE_END()