checkqueue_tests.cpp
1 // Copyright (c) 2012-2022 The Bitcoin Core developers 2 // Distributed under the MIT software license, see the accompanying 3 // file COPYING or http://www.opensource.org/licenses/mit-license.php. 4 5 #include <checkqueue.h> 6 #include <common/args.h> 7 #include <sync.h> 8 #include <test/util/random.h> 9 #include <test/util/setup_common.h> 10 #include <util/chaintype.h> 11 #include <util/time.h> 12 13 #include <boost/test/unit_test.hpp> 14 15 #include <atomic> 16 #include <condition_variable> 17 #include <mutex> 18 #include <thread> 19 #include <unordered_set> 20 #include <utility> 21 #include <vector> 22 23 /** 24 * Identical to TestingSetup but excludes lock contention logging if 25 * `DEBUG_LOCKCONTENTION` is defined, as some of these tests are designed to be 26 * heavily contested to trigger race conditions or other issues. 27 */ 28 struct NoLockLoggingTestingSetup : public TestingSetup { 29 NoLockLoggingTestingSetup() 30 #ifdef DEBUG_LOCKCONTENTION 31 : TestingSetup{ChainType::MAIN, /*extra_args=*/{"-debugexclude=lock"}} {} 32 #else 33 : TestingSetup{ChainType::MAIN} {} 34 #endif 35 }; 36 37 BOOST_FIXTURE_TEST_SUITE(checkqueue_tests, NoLockLoggingTestingSetup) 38 39 static const unsigned int QUEUE_BATCH_SIZE = 128; 40 static const int SCRIPT_CHECK_THREADS = 3; 41 42 struct FakeCheck { 43 bool operator()() const 44 { 45 return true; 46 } 47 }; 48 49 struct FakeCheckCheckCompletion { 50 static std::atomic<size_t> n_calls; 51 bool operator()() 52 { 53 n_calls.fetch_add(1, std::memory_order_relaxed); 54 return true; 55 } 56 }; 57 58 struct FailingCheck { 59 bool fails; 60 FailingCheck(bool _fails) : fails(_fails){}; 61 bool operator()() const 62 { 63 return !fails; 64 } 65 }; 66 67 struct UniqueCheck { 68 static Mutex m; 69 static std::unordered_multiset<size_t> results GUARDED_BY(m); 70 size_t check_id; 71 UniqueCheck(size_t check_id_in) : check_id(check_id_in){}; 72 bool operator()() 73 { 74 LOCK(m); 75 results.insert(check_id); 76 return true; 77 } 78 }; 79 80 81 struct MemoryCheck { 82 static std::atomic<size_t> fake_allocated_memory; 83 bool b {false}; 84 bool operator()() const 85 { 86 return true; 87 } 88 MemoryCheck(const MemoryCheck& x) 89 { 90 // We have to do this to make sure that destructor calls are paired 91 // 92 // Really, copy constructor should be deletable, but CCheckQueue breaks 93 // if it is deleted because of internal push_back. 94 fake_allocated_memory.fetch_add(b, std::memory_order_relaxed); 95 }; 96 MemoryCheck(bool b_) : b(b_) 97 { 98 fake_allocated_memory.fetch_add(b, std::memory_order_relaxed); 99 }; 100 ~MemoryCheck() 101 { 102 fake_allocated_memory.fetch_sub(b, std::memory_order_relaxed); 103 }; 104 }; 105 106 struct FrozenCleanupCheck { 107 static std::atomic<uint64_t> nFrozen; 108 static std::condition_variable cv; 109 static std::mutex m; 110 bool should_freeze{true}; 111 bool operator()() const 112 { 113 return true; 114 } 115 FrozenCleanupCheck() = default; 116 ~FrozenCleanupCheck() 117 { 118 if (should_freeze) { 119 std::unique_lock<std::mutex> l(m); 120 nFrozen.store(1, std::memory_order_relaxed); 121 cv.notify_one(); 122 cv.wait(l, []{ return nFrozen.load(std::memory_order_relaxed) == 0;}); 123 } 124 } 125 FrozenCleanupCheck(FrozenCleanupCheck&& other) noexcept 126 { 127 should_freeze = other.should_freeze; 128 other.should_freeze = false; 129 } 130 FrozenCleanupCheck& operator=(FrozenCleanupCheck&& other) noexcept 131 { 132 should_freeze = other.should_freeze; 133 other.should_freeze = false; 134 return *this; 135 } 136 }; 137 138 // Static Allocations 139 std::mutex FrozenCleanupCheck::m{}; 140 std::atomic<uint64_t> FrozenCleanupCheck::nFrozen{0}; 141 std::condition_variable FrozenCleanupCheck::cv{}; 142 Mutex UniqueCheck::m; 143 std::unordered_multiset<size_t> UniqueCheck::results; 144 std::atomic<size_t> FakeCheckCheckCompletion::n_calls{0}; 145 std::atomic<size_t> MemoryCheck::fake_allocated_memory{0}; 146 147 // Queue Typedefs 148 typedef CCheckQueue<FakeCheckCheckCompletion> Correct_Queue; 149 typedef CCheckQueue<FakeCheck> Standard_Queue; 150 typedef CCheckQueue<FailingCheck> Failing_Queue; 151 typedef CCheckQueue<UniqueCheck> Unique_Queue; 152 typedef CCheckQueue<MemoryCheck> Memory_Queue; 153 typedef CCheckQueue<FrozenCleanupCheck> FrozenCleanup_Queue; 154 155 156 /** This test case checks that the CCheckQueue works properly 157 * with each specified size_t Checks pushed. 158 */ 159 static void Correct_Queue_range(std::vector<size_t> range) 160 { 161 auto small_queue = std::make_unique<Correct_Queue>(QUEUE_BATCH_SIZE, SCRIPT_CHECK_THREADS); 162 // Make vChecks here to save on malloc (this test can be slow...) 163 std::vector<FakeCheckCheckCompletion> vChecks; 164 vChecks.reserve(9); 165 for (const size_t i : range) { 166 size_t total = i; 167 FakeCheckCheckCompletion::n_calls = 0; 168 CCheckQueueControl<FakeCheckCheckCompletion> control(small_queue.get()); 169 while (total) { 170 vChecks.clear(); 171 vChecks.resize(std::min<size_t>(total, InsecureRandRange(10))); 172 total -= vChecks.size(); 173 control.Add(std::move(vChecks)); 174 } 175 BOOST_REQUIRE(control.Wait()); 176 BOOST_REQUIRE_EQUAL(FakeCheckCheckCompletion::n_calls, i); 177 } 178 } 179 180 /** Test that 0 checks is correct 181 */ 182 BOOST_AUTO_TEST_CASE(test_CheckQueue_Correct_Zero) 183 { 184 std::vector<size_t> range; 185 range.push_back(size_t{0}); 186 Correct_Queue_range(range); 187 } 188 /** Test that 1 check is correct 189 */ 190 BOOST_AUTO_TEST_CASE(test_CheckQueue_Correct_One) 191 { 192 std::vector<size_t> range; 193 range.push_back(size_t{1}); 194 Correct_Queue_range(range); 195 } 196 /** Test that MAX check is correct 197 */ 198 BOOST_AUTO_TEST_CASE(test_CheckQueue_Correct_Max) 199 { 200 std::vector<size_t> range; 201 range.push_back(100000); 202 Correct_Queue_range(range); 203 } 204 /** Test that random numbers of checks are correct 205 */ 206 BOOST_AUTO_TEST_CASE(test_CheckQueue_Correct_Random) 207 { 208 std::vector<size_t> range; 209 range.reserve(100000/1000); 210 for (size_t i = 2; i < 100000; i += std::max((size_t)1, (size_t)InsecureRandRange(std::min((size_t)1000, ((size_t)100000) - i)))) 211 range.push_back(i); 212 Correct_Queue_range(range); 213 } 214 215 216 /** Test that failing checks are caught */ 217 BOOST_AUTO_TEST_CASE(test_CheckQueue_Catches_Failure) 218 { 219 auto fail_queue = std::make_unique<Failing_Queue>(QUEUE_BATCH_SIZE, SCRIPT_CHECK_THREADS); 220 for (size_t i = 0; i < 1001; ++i) { 221 CCheckQueueControl<FailingCheck> control(fail_queue.get()); 222 size_t remaining = i; 223 while (remaining) { 224 size_t r = InsecureRandRange(10); 225 226 std::vector<FailingCheck> vChecks; 227 vChecks.reserve(r); 228 for (size_t k = 0; k < r && remaining; k++, remaining--) 229 vChecks.emplace_back(remaining == 1); 230 control.Add(std::move(vChecks)); 231 } 232 bool success = control.Wait(); 233 if (i > 0) { 234 BOOST_REQUIRE(!success); 235 } else if (i == 0) { 236 BOOST_REQUIRE(success); 237 } 238 } 239 } 240 // Test that a block validation which fails does not interfere with 241 // future blocks, ie, the bad state is cleared. 242 BOOST_AUTO_TEST_CASE(test_CheckQueue_Recovers_From_Failure) 243 { 244 auto fail_queue = std::make_unique<Failing_Queue>(QUEUE_BATCH_SIZE, SCRIPT_CHECK_THREADS); 245 for (auto times = 0; times < 10; ++times) { 246 for (const bool end_fails : {true, false}) { 247 CCheckQueueControl<FailingCheck> control(fail_queue.get()); 248 { 249 std::vector<FailingCheck> vChecks; 250 vChecks.resize(100, false); 251 vChecks[99] = end_fails; 252 control.Add(std::move(vChecks)); 253 } 254 bool r =control.Wait(); 255 BOOST_REQUIRE(r != end_fails); 256 } 257 } 258 } 259 260 // Test that unique checks are actually all called individually, rather than 261 // just one check being called repeatedly. Test that checks are not called 262 // more than once as well 263 BOOST_AUTO_TEST_CASE(test_CheckQueue_UniqueCheck) 264 { 265 auto queue = std::make_unique<Unique_Queue>(QUEUE_BATCH_SIZE, SCRIPT_CHECK_THREADS); 266 size_t COUNT = 100000; 267 size_t total = COUNT; 268 { 269 CCheckQueueControl<UniqueCheck> control(queue.get()); 270 while (total) { 271 size_t r = InsecureRandRange(10); 272 std::vector<UniqueCheck> vChecks; 273 for (size_t k = 0; k < r && total; k++) 274 vChecks.emplace_back(--total); 275 control.Add(std::move(vChecks)); 276 } 277 } 278 { 279 LOCK(UniqueCheck::m); 280 bool r = true; 281 BOOST_REQUIRE_EQUAL(UniqueCheck::results.size(), COUNT); 282 for (size_t i = 0; i < COUNT; ++i) { 283 r = r && UniqueCheck::results.count(i) == 1; 284 } 285 BOOST_REQUIRE(r); 286 } 287 } 288 289 290 // Test that blocks which might allocate lots of memory free their memory aggressively. 291 // 292 // This test attempts to catch a pathological case where by lazily freeing 293 // checks might mean leaving a check un-swapped out, and decreasing by 1 each 294 // time could leave the data hanging across a sequence of blocks. 295 BOOST_AUTO_TEST_CASE(test_CheckQueue_Memory) 296 { 297 auto queue = std::make_unique<Memory_Queue>(QUEUE_BATCH_SIZE, SCRIPT_CHECK_THREADS); 298 for (size_t i = 0; i < 1000; ++i) { 299 size_t total = i; 300 { 301 CCheckQueueControl<MemoryCheck> control(queue.get()); 302 while (total) { 303 size_t r = InsecureRandRange(10); 304 std::vector<MemoryCheck> vChecks; 305 for (size_t k = 0; k < r && total; k++) { 306 total--; 307 // Each iteration leaves data at the front, back, and middle 308 // to catch any sort of deallocation failure 309 vChecks.emplace_back(total == 0 || total == i || total == i/2); 310 } 311 control.Add(std::move(vChecks)); 312 } 313 } 314 BOOST_REQUIRE_EQUAL(MemoryCheck::fake_allocated_memory, 0U); 315 } 316 } 317 318 // Test that a new verification cannot occur until all checks 319 // have been destructed 320 BOOST_AUTO_TEST_CASE(test_CheckQueue_FrozenCleanup) 321 { 322 auto queue = std::make_unique<FrozenCleanup_Queue>(QUEUE_BATCH_SIZE, SCRIPT_CHECK_THREADS); 323 bool fails = false; 324 std::thread t0([&]() { 325 CCheckQueueControl<FrozenCleanupCheck> control(queue.get()); 326 std::vector<FrozenCleanupCheck> vChecks(1); 327 control.Add(std::move(vChecks)); 328 bool waitResult = control.Wait(); // Hangs here 329 assert(waitResult); 330 }); 331 { 332 std::unique_lock<std::mutex> l(FrozenCleanupCheck::m); 333 // Wait until the queue has finished all jobs and frozen 334 FrozenCleanupCheck::cv.wait(l, [](){return FrozenCleanupCheck::nFrozen == 1;}); 335 } 336 // Try to get control of the queue a bunch of times 337 for (auto x = 0; x < 100 && !fails; ++x) { 338 fails = queue->m_control_mutex.try_lock(); 339 } 340 { 341 // Unfreeze (we need lock n case of spurious wakeup) 342 std::unique_lock<std::mutex> l(FrozenCleanupCheck::m); 343 FrozenCleanupCheck::nFrozen = 0; 344 } 345 // Awaken frozen destructor 346 FrozenCleanupCheck::cv.notify_one(); 347 // Wait for control to finish 348 t0.join(); 349 BOOST_REQUIRE(!fails); 350 } 351 352 353 /** Test that CCheckQueueControl is threadsafe */ 354 BOOST_AUTO_TEST_CASE(test_CheckQueueControl_Locks) 355 { 356 auto queue = std::make_unique<Standard_Queue>(QUEUE_BATCH_SIZE, SCRIPT_CHECK_THREADS); 357 { 358 std::vector<std::thread> tg; 359 std::atomic<int> nThreads {0}; 360 std::atomic<int> fails {0}; 361 for (size_t i = 0; i < 3; ++i) { 362 tg.emplace_back( 363 [&]{ 364 CCheckQueueControl<FakeCheck> control(queue.get()); 365 // While sleeping, no other thread should execute to this point 366 auto observed = ++nThreads; 367 UninterruptibleSleep(std::chrono::milliseconds{10}); 368 fails += observed != nThreads; 369 }); 370 } 371 for (auto& thread: tg) { 372 if (thread.joinable()) thread.join(); 373 } 374 BOOST_REQUIRE_EQUAL(fails, 0); 375 } 376 { 377 std::vector<std::thread> tg; 378 std::mutex m; 379 std::condition_variable cv; 380 bool has_lock{false}; 381 bool has_tried{false}; 382 bool done{false}; 383 bool done_ack{false}; 384 { 385 std::unique_lock<std::mutex> l(m); 386 tg.emplace_back([&]{ 387 CCheckQueueControl<FakeCheck> control(queue.get()); 388 std::unique_lock<std::mutex> ll(m); 389 has_lock = true; 390 cv.notify_one(); 391 cv.wait(ll, [&]{return has_tried;}); 392 done = true; 393 cv.notify_one(); 394 // Wait until the done is acknowledged 395 // 396 cv.wait(ll, [&]{return done_ack;}); 397 }); 398 // Wait for thread to get the lock 399 cv.wait(l, [&](){return has_lock;}); 400 bool fails = false; 401 for (auto x = 0; x < 100 && !fails; ++x) { 402 fails = queue->m_control_mutex.try_lock(); 403 } 404 has_tried = true; 405 cv.notify_one(); 406 cv.wait(l, [&](){return done;}); 407 // Acknowledge the done 408 done_ack = true; 409 cv.notify_one(); 410 BOOST_REQUIRE(!fails); 411 } 412 for (auto& thread: tg) { 413 if (thread.joinable()) thread.join(); 414 } 415 } 416 } 417 BOOST_AUTO_TEST_SUITE_END()