base.cpp
1 // Copyright (c) 2017-present The Bitcoin Core developers 2 // Distributed under the MIT software license, see the accompanying 3 // file COPYING or http://www.opensource.org/licenses/mit-license.php. 4 5 #include <index/base.h> 6 7 #include <chain.h> 8 #include <common/args.h> 9 #include <dbwrapper.h> 10 #include <interfaces/chain.h> 11 #include <interfaces/types.h> 12 #include <kernel/types.h> 13 #include <node/abort.h> 14 #include <node/blockstorage.h> 15 #include <node/context.h> 16 #include <node/database_args.h> 17 #include <node/interface_ui.h> 18 #include <primitives/block.h> 19 #include <sync.h> 20 #include <tinyformat.h> 21 #include <uint256.h> 22 #include <undo.h> 23 #include <util/fs.h> 24 #include <util/log.h> 25 #include <util/string.h> 26 #include <util/thread.h> 27 #include <util/threadinterrupt.h> 28 #include <util/time.h> 29 #include <util/translation.h> 30 #include <validation.h> 31 #include <validationinterface.h> 32 33 #include <cassert> 34 #include <compare> 35 #include <cstdint> 36 #include <memory> 37 #include <optional> 38 #include <span> 39 #include <stdexcept> 40 #include <string> 41 #include <thread> 42 #include <utility> 43 #include <vector> 44 45 using kernel::ChainstateRole; 46 47 constexpr uint8_t DB_BEST_BLOCK{'B'}; 48 49 constexpr auto SYNC_LOG_INTERVAL{30s}; 50 constexpr auto SYNC_LOCATOR_WRITE_INTERVAL{30s}; 51 52 template <typename... Args> 53 void BaseIndex::FatalErrorf(util::ConstevalFormatString<sizeof...(Args)> fmt, const Args&... args) 54 { 55 auto message = tfm::format(fmt, args...); 56 node::AbortNode(m_chain->context()->shutdown_request, m_chain->context()->exit_status, Untranslated(message), m_chain->context()->warnings.get()); 57 } 58 59 CBlockLocator GetLocator(interfaces::Chain& chain, const uint256& block_hash) 60 { 61 CBlockLocator locator; 62 bool found = chain.findBlock(block_hash, interfaces::FoundBlock().locator(locator)); 63 assert(found); 64 assert(!locator.IsNull()); 65 return locator; 66 } 67 68 BaseIndex::DB::DB(const fs::path& path, size_t n_cache_size, bool f_memory, bool f_wipe, bool f_obfuscate) : 69 CDBWrapper{DBParams{ 70 .path = path, 71 .cache_bytes = n_cache_size, 72 .memory_only = f_memory, 73 .wipe_data = f_wipe, 74 .obfuscate = f_obfuscate, 75 .options = [] { DBOptions options; node::ReadDatabaseArgs(gArgs, options); return options; }()}} 76 {} 77 78 CBlockLocator BaseIndex::DB::ReadBestBlock() const 79 { 80 CBlockLocator locator; 81 82 bool success = Read(DB_BEST_BLOCK, locator); 83 if (!success) { 84 locator.SetNull(); 85 } 86 87 return locator; 88 } 89 90 void BaseIndex::DB::WriteBestBlock(CDBBatch& batch, const CBlockLocator& locator) 91 { 92 batch.Write(DB_BEST_BLOCK, locator); 93 } 94 95 BaseIndex::BaseIndex(std::unique_ptr<interfaces::Chain> chain, std::string name) 96 : m_chain{std::move(chain)}, m_name{std::move(name)} {} 97 98 BaseIndex::~BaseIndex() 99 { 100 Interrupt(); 101 Stop(); 102 } 103 104 bool BaseIndex::Init() 105 { 106 AssertLockNotHeld(cs_main); 107 108 // May need reset if index is being restarted. 109 m_interrupt.reset(); 110 111 // m_chainstate member gives indexing code access to node internals. It is 112 // removed in followup https://github.com/bitcoin/bitcoin/pull/24230 113 m_chainstate = WITH_LOCK(::cs_main, 114 return &m_chain->context()->chainman->ValidatedChainstate()); 115 // Register to validation interface before setting the 'm_synced' flag, so that 116 // callbacks are not missed once m_synced is true. 117 m_chain->context()->validation_signals->RegisterValidationInterface(this); 118 119 const auto locator{GetDB().ReadBestBlock()}; 120 121 LOCK(cs_main); 122 CChain& index_chain = m_chainstate->m_chain; 123 124 if (locator.IsNull()) { 125 SetBestBlockIndex(nullptr); 126 } else { 127 // Setting the best block to the locator's top block. If it is not part of the 128 // best chain, we will rewind to the fork point during index sync 129 const CBlockIndex* locator_index{m_chainstate->m_blockman.LookupBlockIndex(locator.vHave.at(0))}; 130 if (!locator_index) { 131 return InitError(Untranslated(strprintf("best block of %s not found. Please rebuild the index.", GetName()))); 132 } 133 SetBestBlockIndex(locator_index); 134 } 135 136 // Child init 137 const CBlockIndex* start_block = m_best_block_index.load(); 138 if (!CustomInit(start_block ? std::make_optional(interfaces::BlockRef{start_block->GetBlockHash(), start_block->nHeight}) : std::nullopt)) { 139 return false; 140 } 141 142 // Note: this will latch to true immediately if the user starts up with an empty 143 // datadir and an index enabled. If this is the case, indexation will happen solely 144 // via `BlockConnected` signals until, possibly, the next restart. 145 m_synced = start_block == index_chain.Tip(); 146 m_init = true; 147 return true; 148 } 149 150 static const CBlockIndex* NextSyncBlock(const CBlockIndex* pindex_prev, CChain& chain) EXCLUSIVE_LOCKS_REQUIRED(cs_main) 151 { 152 AssertLockHeld(cs_main); 153 154 if (!pindex_prev) { 155 return chain.Genesis(); 156 } 157 158 const CBlockIndex* pindex = chain.Next(pindex_prev); 159 if (pindex) { 160 return pindex; 161 } 162 163 // Since block is not in the chain, return the next block in the chain AFTER the last common ancestor. 164 // Caller will be responsible for rewinding back to the common ancestor. 165 return chain.Next(chain.FindFork(pindex_prev)); 166 } 167 168 bool BaseIndex::ProcessBlock(const CBlockIndex* pindex, const CBlock* block_data) 169 { 170 interfaces::BlockInfo block_info = kernel::MakeBlockInfo(pindex, block_data); 171 172 CBlock block; 173 if (!block_data) { // disk lookup if block data wasn't provided 174 if (!m_chainstate->m_blockman.ReadBlock(block, *pindex)) { 175 FatalErrorf("Failed to read block %s from disk", 176 pindex->GetBlockHash().ToString()); 177 return false; 178 } 179 block_info.data = █ 180 } 181 182 CBlockUndo block_undo; 183 if (CustomOptions().connect_undo_data) { 184 if (pindex->nHeight > 0 && !m_chainstate->m_blockman.ReadBlockUndo(block_undo, *pindex)) { 185 FatalErrorf("Failed to read undo block data %s from disk", 186 pindex->GetBlockHash().ToString()); 187 return false; 188 } 189 block_info.undo_data = &block_undo; 190 } 191 192 if (!CustomAppend(block_info)) { 193 FatalErrorf("Failed to write block %s to index database", 194 pindex->GetBlockHash().ToString()); 195 return false; 196 } 197 198 return true; 199 } 200 201 void BaseIndex::Sync() 202 { 203 const CBlockIndex* pindex = m_best_block_index.load(); 204 if (!m_synced) { 205 auto last_log_time{NodeClock::now()}; 206 auto last_locator_write_time{last_log_time}; 207 while (true) { 208 if (m_interrupt) { 209 LogInfo("%s: m_interrupt set; exiting ThreadSync", GetName()); 210 211 SetBestBlockIndex(pindex); 212 // No need to handle errors in Commit. If it fails, the error will be already be 213 // logged. The best way to recover is to continue, as index cannot be corrupted by 214 // a missed commit to disk for an advanced index state. 215 Commit(); 216 return; 217 } 218 219 const CBlockIndex* pindex_next = WITH_LOCK(cs_main, return NextSyncBlock(pindex, m_chainstate->m_chain)); 220 // If pindex_next is null, it means pindex is the chain tip, so 221 // commit data indexed so far. 222 if (!pindex_next) { 223 SetBestBlockIndex(pindex); 224 // No need to handle errors in Commit. See rationale above. 225 Commit(); 226 227 // If pindex is still the chain tip after committing, exit the 228 // sync loop. It is important for cs_main to be locked while 229 // setting m_synced = true, otherwise a new block could be 230 // attached while m_synced is still false, and it would not be 231 // indexed. 232 LOCK(::cs_main); 233 pindex_next = NextSyncBlock(pindex, m_chainstate->m_chain); 234 if (!pindex_next) { 235 m_synced = true; 236 break; 237 } 238 } 239 if (pindex_next->pprev != pindex && !Rewind(pindex, pindex_next->pprev)) { 240 FatalErrorf("Failed to rewind %s to a previous chain tip", GetName()); 241 return; 242 } 243 pindex = pindex_next; 244 245 246 if (!ProcessBlock(pindex)) return; // error logged internally 247 248 auto current_time{NodeClock::now()}; 249 if (current_time - last_log_time >= SYNC_LOG_INTERVAL) { 250 LogInfo("Syncing %s with block chain from height %d", GetName(), pindex->nHeight); 251 last_log_time = current_time; 252 } 253 254 if (current_time - last_locator_write_time >= SYNC_LOCATOR_WRITE_INTERVAL) { 255 SetBestBlockIndex(pindex); 256 last_locator_write_time = current_time; 257 // No need to handle errors in Commit. See rationale above. 258 Commit(); 259 } 260 } 261 } 262 263 if (pindex) { 264 LogInfo("%s is enabled at height %d", GetName(), pindex->nHeight); 265 } else { 266 LogInfo("%s is enabled", GetName()); 267 } 268 } 269 270 bool BaseIndex::Commit() 271 { 272 // Don't commit anything if we haven't indexed any block yet 273 // (this could happen if init is interrupted). 274 bool ok = m_best_block_index != nullptr; 275 if (ok) { 276 CDBBatch batch(GetDB()); 277 ok = CustomCommit(batch); 278 if (ok) { 279 GetDB().WriteBestBlock(batch, GetLocator(*m_chain, m_best_block_index.load()->GetBlockHash())); 280 GetDB().WriteBatch(batch); 281 } 282 } 283 if (!ok) { 284 LogError("Failed to commit latest %s state", GetName()); 285 return false; 286 } 287 return true; 288 } 289 290 bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_tip) 291 { 292 assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip); 293 294 CBlock block; 295 CBlockUndo block_undo; 296 297 for (const CBlockIndex* iter_tip = current_tip; iter_tip != new_tip; iter_tip = iter_tip->pprev) { 298 interfaces::BlockInfo block_info = kernel::MakeBlockInfo(iter_tip); 299 if (CustomOptions().disconnect_data) { 300 if (!m_chainstate->m_blockman.ReadBlock(block, *iter_tip)) { 301 LogError("Failed to read block %s from disk", 302 iter_tip->GetBlockHash().ToString()); 303 return false; 304 } 305 block_info.data = █ 306 } 307 if (CustomOptions().disconnect_undo_data && iter_tip->nHeight > 0) { 308 if (!m_chainstate->m_blockman.ReadBlockUndo(block_undo, *iter_tip)) { 309 return false; 310 } 311 block_info.undo_data = &block_undo; 312 } 313 if (!CustomRemove(block_info)) { 314 return false; 315 } 316 } 317 318 // Don't commit here - the committed index state must never be ahead of the 319 // flushed chainstate, otherwise unclean restarts would lead to index corruption. 320 // Pruning has a minimum of 288 blocks-to-keep and getting the index 321 // out of sync may be possible but a users fault. 322 // In case we reorg beyond the pruned depth, ReadBlock would 323 // throw and lead to a graceful shutdown 324 SetBestBlockIndex(new_tip); 325 return true; 326 } 327 328 void BaseIndex::BlockConnected(const ChainstateRole& role, const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex) 329 { 330 // Ignore events from not fully validated chains to avoid out-of-order indexing. 331 // 332 // TODO at some point we could parameterize whether a particular index can be 333 // built out of order, but for now just do the conservative simple thing. 334 if (!role.validated) { 335 return; 336 } 337 338 // Ignore BlockConnected signals until we have fully indexed the chain. 339 if (!m_synced) { 340 return; 341 } 342 343 const CBlockIndex* best_block_index = m_best_block_index.load(); 344 if (!best_block_index) { 345 if (pindex->nHeight != 0) { 346 FatalErrorf("First block connected is not the genesis block (height=%d)", 347 pindex->nHeight); 348 return; 349 } 350 } else { 351 // Ensure block connects to an ancestor of the current best block. This should be the case 352 // most of the time, but may not be immediately after the sync thread catches up and sets 353 // m_synced. Consider the case where there is a reorg and the blocks on the stale branch are 354 // in the ValidationInterface queue backlog even after the sync thread has caught up to the 355 // new chain tip. In this unlikely event, log a warning and let the queue clear. 356 if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) { 357 LogWarning("Block %s does not connect to an ancestor of " 358 "known best chain (tip=%s); not updating index", 359 pindex->GetBlockHash().ToString(), 360 best_block_index->GetBlockHash().ToString()); 361 return; 362 } 363 if (best_block_index != pindex->pprev && !Rewind(best_block_index, pindex->pprev)) { 364 FatalErrorf("Failed to rewind %s to a previous chain tip", 365 GetName()); 366 return; 367 } 368 } 369 370 // Dispatch block to child class; errors are logged internally and abort the node. 371 if (ProcessBlock(pindex, block.get())) { 372 // Setting the best block index is intentionally the last step of this 373 // function, so BlockUntilSyncedToCurrentChain callers waiting for the 374 // best block index to be updated can rely on the block being fully 375 // processed, and the index object being safe to delete. 376 SetBestBlockIndex(pindex); 377 } 378 } 379 380 void BaseIndex::ChainStateFlushed(const ChainstateRole& role, const CBlockLocator& locator) 381 { 382 // Ignore events from not fully validated chains to avoid out-of-order indexing. 383 if (!role.validated) { 384 return; 385 } 386 387 if (!m_synced) { 388 return; 389 } 390 391 const uint256& locator_tip_hash = locator.vHave.front(); 392 const CBlockIndex* locator_tip_index; 393 { 394 LOCK(cs_main); 395 locator_tip_index = m_chainstate->m_blockman.LookupBlockIndex(locator_tip_hash); 396 } 397 398 if (!locator_tip_index) { 399 FatalErrorf("First block (hash=%s) in locator was not found", 400 locator_tip_hash.ToString()); 401 return; 402 } 403 404 // This checks that ChainStateFlushed callbacks are received after BlockConnected. The check may fail 405 // immediately after the sync thread catches up and sets m_synced. Consider the case where 406 // there is a reorg and the blocks on the stale branch are in the ValidationInterface queue 407 // backlog even after the sync thread has caught up to the new chain tip. In this unlikely 408 // event, log a warning and let the queue clear. 409 const CBlockIndex* best_block_index = m_best_block_index.load(); 410 if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) { 411 LogWarning("Locator contains block (hash=%s) not on known best " 412 "chain (tip=%s); not writing index locator", 413 locator_tip_hash.ToString(), 414 best_block_index->GetBlockHash().ToString()); 415 return; 416 } 417 418 // No need to handle errors in Commit. If it fails, the error will be already be logged. The 419 // best way to recover is to continue, as index cannot be corrupted by a missed commit to disk 420 // for an advanced index state. 421 Commit(); 422 } 423 424 bool BaseIndex::BlockUntilSyncedToCurrentChain() const 425 { 426 AssertLockNotHeld(cs_main); 427 428 if (!m_synced) { 429 return false; 430 } 431 432 { 433 // Skip the queue-draining stuff if we know we're caught up with 434 // m_chain.Tip(). 435 LOCK(cs_main); 436 const CBlockIndex* chain_tip = m_chainstate->m_chain.Tip(); 437 const CBlockIndex* best_block_index = m_best_block_index.load(); 438 if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) { 439 return true; 440 } 441 } 442 443 LogInfo("%s is catching up on block notifications", GetName()); 444 m_chain->context()->validation_signals->SyncWithValidationInterfaceQueue(); 445 return true; 446 } 447 448 void BaseIndex::Interrupt() 449 { 450 m_interrupt(); 451 } 452 453 bool BaseIndex::StartBackgroundSync() 454 { 455 if (!m_init) throw std::logic_error("Error: Cannot start a non-initialized index"); 456 457 m_thread_sync = std::thread(&util::TraceThread, GetName(), [this] { Sync(); }); 458 return true; 459 } 460 461 void BaseIndex::Stop() 462 { 463 if (m_chain->context()->validation_signals) { 464 m_chain->context()->validation_signals->UnregisterValidationInterface(this); 465 } 466 467 if (m_thread_sync.joinable()) { 468 m_thread_sync.join(); 469 } 470 } 471 472 IndexSummary BaseIndex::GetSummary() const 473 { 474 IndexSummary summary{}; 475 summary.name = GetName(); 476 summary.synced = m_synced; 477 if (const auto& pindex = m_best_block_index.load()) { 478 summary.best_block_height = pindex->nHeight; 479 summary.best_block_hash = pindex->GetBlockHash(); 480 } else { 481 summary.best_block_height = 0; 482 summary.best_block_hash = m_chain->getBlockHash(0); 483 } 484 return summary; 485 } 486 487 void BaseIndex::SetBestBlockIndex(const CBlockIndex* block) 488 { 489 assert(!m_chainstate->m_blockman.IsPruneMode() || AllowPrune()); 490 491 if (AllowPrune() && block) { 492 node::PruneLockInfo prune_lock; 493 prune_lock.height_first = block->nHeight; 494 WITH_LOCK(::cs_main, m_chainstate->m_blockman.UpdatePruneLock(GetName(), prune_lock)); 495 } 496 497 // Intentionally set m_best_block_index as the last step in this function, 498 // after updating prune locks above, and after making any other references 499 // to *this, so the BlockUntilSyncedToCurrentChain function (which checks 500 // m_best_block_index as an optimization) can be used to wait for the last 501 // BlockConnected notification and safely assume that prune locks are 502 // updated and that the index object is safe to delete. 503 m_best_block_index = block; 504 }